From 4cda42ad8fe833bb15f7a1081547e2b344f04e22 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 15:59:35 -0500 Subject: [PATCH 001/329] MVP of working Nomad Secret Backend --- builtin/logical/nomad/backend.go | 37 ++++++ builtin/logical/nomad/client.go | 28 +++++ builtin/logical/nomad/path_config.go | 104 ++++++++++++++++ builtin/logical/nomad/path_roles.go | 163 ++++++++++++++++++++++++++ builtin/logical/nomad/path_token.go | 80 +++++++++++++ builtin/logical/nomad/secret_token.go | 59 ++++++++++ cli/commands.go | 2 + 7 files changed, 473 insertions(+) create mode 100644 builtin/logical/nomad/backend.go create mode 100644 builtin/logical/nomad/client.go create mode 100644 builtin/logical/nomad/path_config.go create mode 100644 builtin/logical/nomad/path_roles.go create mode 100644 builtin/logical/nomad/path_token.go create mode 100644 builtin/logical/nomad/secret_token.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go new file mode 100644 index 0000000000..99386d095b --- /dev/null +++ b/builtin/logical/nomad/backend.go @@ -0,0 +1,37 @@ +package nomad + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func Factory(conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Paths: []*framework.Path{ + pathConfigAccess(), + pathListRoles(&b), + pathRoles(), + pathToken(&b), + }, + + Secrets: []*framework.Secret{ + secretToken(&b), + }, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend +} diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go new file mode 100644 index 0000000000..2101d31dc8 --- /dev/null +++ b/builtin/logical/nomad/client.go @@ -0,0 +1,28 @@ +package nomad + +import ( + "fmt" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" +) + +func client(s logical.Storage) (*api.Client, error, error) { + conf, userErr, intErr := readConfigAccess(s) + if intErr != nil { + return nil, nil, intErr + } + if userErr != nil { + return nil, userErr, nil + } + if conf == nil { + return nil, nil, fmt.Errorf("no error received but no configuration found") + } + + nomadConf := api.DefaultConfig() + nomadConf.Address = conf.Address + nomadConf.SecretID = conf.Token + + client, err := api.NewClient(nomadConf) + return client, nil, err +} diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go new file mode 100644 index 0000000000..d9e6dc1280 --- /dev/null +++ b/builtin/logical/nomad/path_config.go @@ -0,0 +1,104 @@ +package nomad + +import ( + "fmt" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathConfigAccess() *framework.Path { + return &framework.Path{ + Pattern: "config/access", + Fields: map[string]*framework.FieldSchema{ + "address": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Nomad server address", + }, + + "scheme": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "URI scheme for the Nomad address", + + // https would be a better default but Consul on its own + // defaults to HTTP access, and when HTTPS is enabled it + // disables HTTP, so there isn't really any harm done here. + Default: "http", + }, + + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Token for API calls", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: pathConfigAccessRead, + logical.UpdateOperation: pathConfigAccessWrite, + }, + } +} + +func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { + entry, err := storage.Get("config/access") + if err != nil { + return nil, nil, err + } + if entry == nil { + return nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"), + nil + } + + conf := &accessConfig{} + if err := entry.DecodeJSON(conf); err != nil { + return nil, nil, fmt.Errorf("error reading nomad access configuration: %s", err) + } + + return conf, nil, nil +} + +func pathConfigAccessRead( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + conf, userErr, intErr := readConfigAccess(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + if conf == nil { + return nil, fmt.Errorf("no user error reported but nomad access configuration not found") + } + + return &logical.Response{ + Data: map[string]interface{}{ + "address": conf.Address, + "scheme": conf.Scheme, + }, + }, nil +} + +func pathConfigAccessWrite( + req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/access", accessConfig{ + Address: data.Get("address").(string), + Scheme: data.Get("scheme").(string), + Token: data.Get("token").(string), + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +type accessConfig struct { + Address string `json:"address"` + Scheme string `json:"scheme"` + Token string `json:"token"` +} diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go new file mode 100644 index 0000000000..69846fb735 --- /dev/null +++ b/builtin/logical/nomad/path_roles.go @@ -0,0 +1,163 @@ +package nomad + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + } +} + +func pathRoles() *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + + "policy": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Policy name as previously created in Nomad. Required", + }, + + "token_type": &framework.FieldSchema{ + Type: framework.TypeString, + Default: "client", + Description: `Which type of token to create: 'client' +or 'management'. If a 'management' token, +the "policy" parameter is not required. +Defaults to 'client'.`, + }, + + "lease": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Lease time of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: pathRolesRead, + logical.UpdateOperation: pathRolesWrite, + logical.DeleteOperation: pathRolesDelete, + }, + } +} + +func (b *backend) pathRoleList( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List("policy/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func pathRolesRead( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + entry, err := req.Storage.Get("policy/" + name) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenType == "" { + result.TokenType = "client" + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "lease": result.Lease.String(), + "token_type": result.TokenType, + }, + } + if result.Policy != "" { + resp.Data["policy"] = result.Policy + } + return resp, nil +} + +func pathRolesWrite( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + tokenType := d.Get("token_type").(string) + + switch tokenType { + case "client": + case "management": + default: + return logical.ErrorResponse( + "token_type must be \"client\" or \"management\""), nil + } + + name := d.Get("name").(string) + policy := d.Get("policy").(string) + var err error + if tokenType != "management" { + if policy == "" { + return logical.ErrorResponse( + "policy cannot be empty when not using management tokens"), nil + } + } + + var lease time.Duration + leaseParam := d.Get("lease").(string) + if leaseParam != "" { + lease, err = time.ParseDuration(leaseParam) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "error parsing given lease of %s: %s", leaseParam, err)), nil + } + } + + entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{ + Policy: policy, + Lease: lease, + TokenType: tokenType, + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +func pathRolesDelete( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if err := req.Storage.Delete("policy/" + name); err != nil { + return nil, err + } + return nil, nil +} + +type roleConfig struct { + Policy string `json:"policy"` + Lease time.Duration `json:"lease"` + TokenType string `json:"token_type"` +} diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go new file mode 100644 index 0000000000..e83d700b2e --- /dev/null +++ b/builtin/logical/nomad/path_token.go @@ -0,0 +1,80 @@ +package nomad + +import ( + "fmt" + "time" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathToken(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + Fields: map[string]*framework.FieldSchema{ + "name": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathTokenRead, + }, + } +} + +func (b *backend) pathTokenRead( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + entry, err := req.Storage.Get("policy/" + name) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %s", err) + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("Role '%s' not found", name)), nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenType == "" { + result.TokenType = "client" + } + + // Get the nomad client + c, userErr, intErr := client(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + + // Generate a name for the token + tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano()) + + // Create it + token, _, err := c.ACLTokens().Create(&api.ACLToken{ + Name: tokenName, + Type: result.TokenType, + Policies: []string{result.Policy}, + }, nil) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Use the helper to create the secret + s := b.Secret(SecretTokenType).Response(map[string]interface{}{ + "token": token, + }, map[string]interface{}{ + "token": token, + }) + s.Secret.TTL = result.Lease + + return s, nil +} diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go new file mode 100644 index 0000000000..4bca290518 --- /dev/null +++ b/builtin/logical/nomad/secret_token.go @@ -0,0 +1,59 @@ +package nomad + +import ( + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +const ( + SecretTokenType = "token" +) + +func secretToken(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretTokenType, + Fields: map[string]*framework.FieldSchema{ + "token": &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Request token", + }, + }, + + Renew: b.secretTokenRenew, + Revoke: secretTokenRevoke, + } +} + +func (b *backend) secretTokenRenew( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + + return framework.LeaseExtend(0, 0, b.System())(req, d) +} + +func secretTokenRevoke( + req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + c, userErr, intErr := client(req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + // Returning logical.ErrorResponse from revocation function is risky + return nil, userErr + } + + tokenRaw, ok := req.Secret.InternalData["token"] + if !ok { + // We return nil here because this is a pre-0.5.3 problem and there is + // nothing we can do about it. We already can't revoke the lease + // properly if it has been renewed and this is documented pre-0.5.3 + // behavior with a security bulletin about it. + return nil, nil + } + + _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) + if err != nil { + return nil, err + } + + return nil, nil +} diff --git a/cli/commands.go b/cli/commands.go index 22c8640a97..83e50b6ed2 100644 --- a/cli/commands.go +++ b/cli/commands.go @@ -45,6 +45,7 @@ import ( "github.com/hashicorp/vault/builtin/logical/mongodb" "github.com/hashicorp/vault/builtin/logical/mssql" "github.com/hashicorp/vault/builtin/logical/mysql" + "github.com/hashicorp/vault/builtin/logical/nomad" "github.com/hashicorp/vault/builtin/logical/pki" "github.com/hashicorp/vault/builtin/logical/postgresql" "github.com/hashicorp/vault/builtin/logical/rabbitmq" @@ -107,6 +108,7 @@ func Commands(metaPtr *meta.Meta) map[string]cli.CommandFactory { LogicalBackends: map[string]logical.Factory{ "aws": aws.Factory, "consul": consul.Factory, + "nomad": nomad.Factory, "postgresql": postgresql.Factory, "cassandra": cassandra.Factory, "pki": pki.Factory, From 393e7bf7e8549d68f06428953b6946eed0916e14 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 17:14:35 -0500 Subject: [PATCH 002/329] Fixing data model --- builtin/logical/nomad/path_roles.go | 10 +++++----- builtin/logical/nomad/path_token.go | 8 +++++--- builtin/logical/nomad/secret_token.go | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 69846fb735..79836c0ac2 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -28,7 +28,7 @@ func pathRoles() *framework.Path { }, "policy": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeCommaStringSlice, Description: "Policy name as previously created in Nomad. Required", }, @@ -93,7 +93,7 @@ func pathRolesRead( "token_type": result.TokenType, }, } - if result.Policy != "" { + if len(result.Policy) != 0 { resp.Data["policy"] = result.Policy } return resp, nil @@ -112,10 +112,10 @@ func pathRolesWrite( } name := d.Get("name").(string) - policy := d.Get("policy").(string) + policy := d.Get("policy").([]string) var err error if tokenType != "management" { - if policy == "" { + if len(policy) == 0 { return logical.ErrorResponse( "policy cannot be empty when not using management tokens"), nil } @@ -157,7 +157,7 @@ func pathRolesDelete( } type roleConfig struct { - Policy string `json:"policy"` + Policy []string `json:"policy"` Lease time.Duration `json:"lease"` TokenType string `json:"token_type"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index e83d700b2e..f837df412b 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -62,7 +62,7 @@ func (b *backend) pathTokenRead( token, _, err := c.ACLTokens().Create(&api.ACLToken{ Name: tokenName, Type: result.TokenType, - Policies: []string{result.Policy}, + Policies: result.Policy, }, nil) if err != nil { return logical.ErrorResponse(err.Error()), nil @@ -70,9 +70,11 @@ func (b *backend) pathTokenRead( // Use the helper to create the secret s := b.Secret(SecretTokenType).Response(map[string]interface{}{ - "token": token, + "secret_id": token.SecretID, + "accessor_id": token.AccessorID, }, map[string]interface{}{ - "token": token, + "secret_id": token.SecretID, + "accessor_id": token.AccessorID, }) s.Secret.TTL = result.Lease diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 4bca290518..ad02b16fee 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -41,7 +41,7 @@ func secretTokenRevoke( return nil, userErr } - tokenRaw, ok := req.Secret.InternalData["token"] + tokenRaw, ok := req.Secret.InternalData["accessor_id"] if !ok { // We return nil here because this is a pre-0.5.3 problem and there is // nothing we can do about it. We already can't revoke the lease From bcd147711ae97a4aa33fe3e572213da3096addb5 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 20 Sep 2017 17:31:28 -0500 Subject: [PATCH 003/329] Adding Nomad secret backend documentation --- .../source/docs/secrets/nomad/index.html.md | 109 ++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 website/source/docs/secrets/nomad/index.html.md diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md new file mode 100644 index 0000000000..d5b87d107b --- /dev/null +++ b/website/source/docs/secrets/nomad/index.html.md @@ -0,0 +1,109 @@ +--- +layout: "docs" +page_title: "Nomad Secret Backend" +sidebar_current: "docs-secrets-nomad" +description: |- + The Nomad secret backend for Vault generates tokens for Nomad dynamically. +--- + +# Nomad Secret Backend + +Name: `Nomad` + +The Nomad secret backend for Vault generates +[Nomad](https://www.nomadproject.io) +API tokens dynamically based on pre-existing Nomad ACL policies. + +This page will show a quick start for this backend. For detailed documentation +on every path, use `vault path-help` after mounting the backend. + +~> **Version information** ACLs are only available on Nomad 0.7.0 and above, +which is currently in beta. + +## Quick Start + +The first step to using the vault backend is to mount it. +Unlike the `generic` backend, the `nomad` backend is not mounted by default. + +``` +$ vault mount nomad +Successfully mounted 'nomad' at 'nomad'! +``` +For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap +process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this +is discouraged for production deployments. +``` +$ nomad acl bootstrap +Accessor ID = 95a0ee55-eaa6-2c0a-a900-ed94c156754e +Secret ID = c25b6ca0-ea4e-000f-807a-fd03fcab6e3c +Name = Bootstrap Token +Type = management +Global = true +Policies = n/a +Create Time = 2017-09-20 19:40:36.527512364 +0000 UTC +Create Index = 7 +Modify Index = 7 +``` +The suggested pattern is to generate a token specifically for Vault, following the +[Nomad ACL guide](https://www.consul.io/docs/agent/http/acl.html) + +Next, we must configure Vault to know how to contact Nomad. +This is done by writing the access information: + +``` +$ vault write nomad/config/access \ + address=http://127.0.0.1:4646 \ + token=adf4238a-882b-9ddc-4a9d-5b6758e4159e +Success! Data written to: nomad/config/access +``` + +In this case, we've configured Vault to connect to Nomad +on the default port with the loopback address. We've also provided +an ACL token to use with the `token` parameter. Vault must have a management +type token so that it can create and revoke ACL tokens. + +The next step is to configure a role. A role is a logical name that maps +to a set of policy names used to generate those credentials. For example, lets create +an "monitoring" role that maps to a "readonly" policy: + +``` +$ vault write nomad/roles/monitoring policy=readonly +Success! Data written to: nomad/roles/monitoring +``` + +The backend expects either a single or a comma separated list of policy names. + +To generate a new Nomad ACL token, we simply read from that role: + +``` +$ vault read nomad/creds/monitoring +Key Value +--- ----- +lease_id nomad/creds/monitoring/78ec3ef3-c806-1022-4aa8-1dbae39c760c +lease_duration 768h0m0s +lease_renewable true +accessor_id a715994d-f5fd-1194-73df-ae9dad616307 +secret_id b31fb56c-0936-5428-8c5f-ed010431aba9 +``` + +Here we can see that Vault has generated a new Nomad ACL token for us. +We can test this token out, by reading it in Nomad (by it's accesor): + +``` +$ nomad acl token info a715994d-f5fd-1194-73df-ae9dad616307 +Accessor ID = a715994d-f5fd-1194-73df-ae9dad616307 +Secret ID = b31fb56c-0936-5428-8c5f-ed010431aba9 +Name = Vault example root 1505945527022465593 +Type = client +Global = false +Policies = [readonly] +Create Time = 2017-09-20 22:12:07.023455379 +0000 UTC +Create Index = 138 +Modify Index = 138 +``` + +## API + +The Nomad secret backend has a full HTTP API. Please see the +[Nomad secret backend API](/api/secret/nomad/index.html) for more +details. From bc1ea9af5340da5d94978255da41cc29fc7e53b2 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 21 Sep 2017 09:18:35 -0500 Subject: [PATCH 004/329] Adding Nomad Secret Backend API documentation --- website/source/api/secret/nomad/index.html.md | 239 ++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 website/source/api/secret/nomad/index.html.md diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md new file mode 100644 index 0000000000..236e3e7e83 --- /dev/null +++ b/website/source/api/secret/nomad/index.html.md @@ -0,0 +1,239 @@ +--- +layout: "api" +page_title: "Nomad Secret Backend - HTTP API" +sidebar_current: "docs-http-secret-nomad" +description: |- + This is the API documentation for the Vault Nomad secret backend. +--- + +# Nomad Secret Backend HTTP API + +This is the API documentation for the Vault Nomad secret backend. For general +information about the usage and operation of the Nomad backend, please see the +[Vault Nomad backend documentation](/docs/secrets/nomad/index.html). + +This documentation assumes the Nomad backend is mounted at the `/nomad` path +in Vault. Since it is possible to mount secret backends at any location, please +update your API calls accordingly. + +## Configure Access + +This endpoint configures the access information for Nomad. This access +information is used so that Vault can communicate with Nomad and generate +Nomad tokens. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/config/access` | `204 (empty body)` | + +### Parameters + +- `address` `(string: )` – Specifies the address of the Nomad + instance, provided as `"protocol://host:port"` like `"http://127.0.0.1:4646"`. + +- `token` `(string: )` – Specifies the Nomad Management token to use. + +### Sample Payload + +```json +{ + "address": "http://127.0.0.1:4646", + "token": "adha..." +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data @payload.json \ + https://vault.rocks/v1/nomad/config/access +``` + +## Create/Update Role + +This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive +updated attributes. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of an existing role against + which to create this Nomad tokens. This is part of the request URL. + +- `lease` `(string: "")` – Specifies the lease for this role. This is provided + as a string duration with a time suffix like `"30s"` or `"1h"`. If not + provided, the default Vault lease is used. + +- `policy` `(string: )` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. + +- `token_type` `(string: "client")` - Specifies the type of token to create when + using this role. Valid values are `"client"` or `"management"`. + +### Sample Payload + +To create a client token with a custom policy: + +```json +{ + "policy": "readonly" +} +``` + +### Sample Request + +``` +$ curl \ + --request POST \ + --header "X-Vault-Token: ..." \ + --data @payload.json \ + https://vault.rocks/v1/nomad/roles/monitoring +``` + +## Read Role + +This endpoint queries for information about a Nomad role with the given name. +If no role exists with that name, a 404 is returned. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/roles/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to query. This + is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/roles/monitoring +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "lease": "0s", + "policy": [ + "example" + ], + "token_type": "client" + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false, + "request_id": "f4c7ee18-72aa-3b20-a910-93b6274a9dc0", + "warnings": null, + "wrap_info": null +} +``` + +## List Roles + +This endpoint lists all existing roles in the backend. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `LIST` | `/nomad/roles` | `200 application/json` | +| `GET` | `/nomad/roles?list=true` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://vault.rocks/v1/nomad/roles +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "keys": [ + "example" + ] + }, + "lease_duration": 0, + "lease_id": "", + "renewable": false, + "request_id": "d7bb167b-81c5-9606-c214-b34fcda45634", + "warnings": null, + "wrap_info": null +} +``` + +## Delete Role + +This endpoint deletes a Nomad role with the given name. Even if the role does +not exist, this endpoint will still return a successful response. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/nomad/roles/:name` | `204 (empty body)` | + +### Parameters + +- `name` `(string: )` – Specifies the name of the role to delete. This + is part of the request URL. + +### Sample Request + +``` +$ curl \ + --request DELETE \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/roles/example-role +``` + +## Generate Credential + +This endpoint generates a dynamic Nomad token based on the given role +definition. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/nomad/creds/:name` | `200 application/json` | + +### Parameters + +- `name` `(string: )` – Specifies the name of an existing role against + which to create this Nomad token. This is part of the request URL. + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/nomad/creds/example +``` + +### Sample Response + +```json +{ + "auth": null, + "data": { + "accessor_id": "c834ba40-8d84-b0c1-c084-3a31d3383c03", + "secret_id": "65af6f07-7f57-bb24-cdae-a27f86a894ce" + }, + "lease_duration": 2764800, + "lease_id": "nomad/creds/example/c2686da3-2431-b6d6-7bbf-c5b9496dd6d7", + "renewable": true, + "request_id": "37a06ca1-8a1d-7f17-bda8-4661289c392b", + "warnings": null, + "wrap_info": null +} +``` From 9338277c67f8fb0e59effa7513613c5eca5afd90 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 21:44:30 +0100 Subject: [PATCH 005/329] Added tests --- builtin/logical/nomad/backend_test.go | 225 ++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 builtin/logical/nomad/backend_test.go diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go new file mode 100644 index 0000000000..a234b3b39a --- /dev/null +++ b/builtin/logical/nomad/backend_test.go @@ -0,0 +1,225 @@ +package nomad + +import ( + "fmt" + "log" + "os" + "reflect" + "testing" + "time" + + nomadapi "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/logical" + "github.com/mitchellh/mapstructure" + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, nomadToken string) { + nomadToken = os.Getenv("NOMAD_TOKEN") + + retAddress = os.Getenv("NOMAD_ADDR") + + if retAddress != "" { + return func() {}, retAddress, nomadToken + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + + dockerOptions := &dockertest.RunOptions{ + Repository: "djenriquez/nomad", + Tag: "v0.7.0-beta1", + Cmd: []string{"agent", "-dev"}, + } + resource, err := pool.RunWithOptions(dockerOptions) + if err != nil { + t.Fatalf("Could not start local Nomad docker container: %s", err) + } + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + retAddress = fmt.Sprintf("http://localhost:%s/", resource.GetPort("4646/tcp")) + + // exponential backoff-retry + if err = pool.Retry(func() error { + var err error + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = retAddress + nomad, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + return err + } + aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil) + nomadToken = aclbootstrap.SecretID + policy := &nomadapi.ACLPolicy{ + Name: "test", + Description: "test", + Rules: `namespace "default" { + policy = "read" + } + `, + } + _, err = nomad.ACLPolicies().Upsert(policy, nil) + if err != nil { + t.Fatal(err) + } + return err + }); err != nil { + cleanup() + t.Fatalf("Could not connect to docker: %s", err) + } + return cleanup, retAddress, nomadToken +} + +func TestBackend_config_access(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() + + connData := map[string]interface{}{ + "address": connURL, + "token": connToken, + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "scheme": "http", + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } +} + +func TestBackend_renew_revoke(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(config) + if err != nil { + t.Fatal(err) + } + + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() + + connData := map[string]interface{}{ + "address": connURL, + "token": connToken, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test" + req.Data = map[string]interface{}{ + "policy": []string{"policy"}, + "lease": "6h", + } + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.IssueTime = time.Now() + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"SecretID"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + log.Printf("[WARN] Generated token: %s", d.Token) + + // Build a client and verify that the credentials work + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = connData["address"].(string) + nomadapiConfig.SecretID = d.Token + client, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + t.Fatal(err) + } + + log.Printf("[WARN] Verifying that the generated token works...") + _, err = client.Status().Leader, nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + resp, err = b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + + log.Printf("[WARN] Verifying that the generated token does not work...") + _, err = client.Status().Leader, nil + if err == nil { + t.Fatal("expected error") + } +} From ca9ad73565381f19be2a3f2b359154390a640dfe Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 23:57:48 +0100 Subject: [PATCH 006/329] Adding Global tokens to the data model --- builtin/logical/nomad/path_roles.go | 9 +++++++++ builtin/logical/nomad/path_token.go | 1 + 2 files changed, 10 insertions(+) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 79836c0ac2..53fb7119d2 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -32,6 +32,11 @@ func pathRoles() *framework.Path { Description: "Policy name as previously created in Nomad. Required", }, + "global": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: "Policy name as previously created in Nomad. Required", + }, + "token_type": &framework.FieldSchema{ Type: framework.TypeString, Default: "client", @@ -91,6 +96,7 @@ func pathRolesRead( Data: map[string]interface{}{ "lease": result.Lease.String(), "token_type": result.TokenType, + "global": result.Global, }, } if len(result.Policy) != 0 { @@ -112,6 +118,7 @@ func pathRolesWrite( } name := d.Get("name").(string) + global := d.Get("global").(bool) policy := d.Get("policy").([]string) var err error if tokenType != "management" { @@ -135,6 +142,7 @@ func pathRolesWrite( Policy: policy, Lease: lease, TokenType: tokenType, + Global: global, }) if err != nil { return nil, err @@ -160,4 +168,5 @@ type roleConfig struct { Policy []string `json:"policy"` Lease time.Duration `json:"lease"` TokenType string `json:"token_type"` + Global bool `json:"global"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index f837df412b..1a1c66fdee 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -63,6 +63,7 @@ func (b *backend) pathTokenRead( Name: tokenName, Type: result.TokenType, Policies: result.Policy, + Global: result.Global, }, nil) if err != nil { return logical.ErrorResponse(err.Error()), nil From bf68079051eed91702167815664b5221dd6f6881 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Thu, 28 Sep 2017 23:58:41 +0100 Subject: [PATCH 007/329] Various fixes (Null pointer, wait for Nomad go up, Auth before policy creation) --- builtin/logical/nomad/backend_test.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index a234b3b39a..7c7a7393b9 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -30,8 +30,9 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma dockerOptions := &dockertest.RunOptions{ Repository: "djenriquez/nomad", - Tag: "v0.7.0-beta1", + Tag: "latest", Cmd: []string{"agent", "-dev"}, + Env: []string{`NOMAD_LOCAL_CONFIG=bind_addr = "0.0.0.0" acl { enabled = true }`}, } resource, err := pool.RunWithOptions(dockerOptions) if err != nil { @@ -46,7 +47,9 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma } retAddress = fmt.Sprintf("http://localhost:%s/", resource.GetPort("4646/tcp")) + // Give Nomad time to initialize + time.Sleep(5000 * time.Millisecond) // exponential backoff-retry if err = pool.Retry(func() error { var err error @@ -57,7 +60,11 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma return err } aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil) + if err != nil { + t.Fatalf("err: %v", err) + } nomadToken = aclbootstrap.SecretID + log.Printf("[WARN] Generated Master token: %s", nomadToken) policy := &nomadapi.ACLPolicy{ Name: "test", Description: "test", @@ -66,7 +73,11 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma } `, } - _, err = nomad.ACLPolicies().Upsert(policy, nil) + nomadAuthConfig := nomadapi.DefaultConfig() + nomadAuthConfig.Address = retAddress + nomadAuthConfig.SecretID = nomadToken + nomadAuth, err := nomadapi.NewClient(nomadAuthConfig) + _, err = nomadAuth.ACLPolicies().Upsert(policy, nil) if err != nil { t.Fatal(err) } @@ -179,7 +190,7 @@ func TestBackend_renew_revoke(t *testing.T) { generatedSecret.TTL = 6 * time.Hour var d struct { - Token string `mapstructure:"SecretID"` + Token string `mapstructure:"secret_id"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) @@ -196,7 +207,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token works...") - _, err = client.Status().Leader, nil + _, err = client.Jobs().List, nil if err != nil { t.Fatal(err) } @@ -218,7 +229,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token does not work...") - _, err = client.Status().Leader, nil + _, err = client.Jobs().List, nil if err == nil { t.Fatal("expected error") } From 7e5c465ecb1838607ac51a6eb71c3a38f758ed85 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 09:33:58 +0100 Subject: [PATCH 008/329] Working tests --- builtin/logical/nomad/backend_test.go | 48 ++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 7c7a7393b9..e1b0956b59 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -72,6 +72,20 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma policy = "read" } `, + } + anonPolicy := &nomadapi.ACLPolicy{ + Name: "anonymous", + Description: "Deny all access for anonymous requests", + Rules: `namespace "default" { + policy = "deny" + } + agent { + policy = "deny" + } + node { + policy = "deny" + } + `, } nomadAuthConfig := nomadapi.DefaultConfig() nomadAuthConfig.Address = retAddress @@ -81,6 +95,10 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma if err != nil { t.Fatal(err) } + _, err = nomadAuth.ACLPolicies().Upsert(anonPolicy, nil) + if err != nil { + t.Fatal(err) + } return err }); err != nil { cleanup() @@ -143,9 +161,10 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - cleanup, connURL, connToken := prepareTestContainer(t) - defer cleanup() - + //cleanup, connURL, connToken := prepareTestContainer(t) + //defer cleanup() + //Ignore cleanup until I can find why the bloody test is not working + _, connURL, connToken := prepareTestContainer(t) connData := map[string]interface{}{ "address": connURL, "token": connToken, @@ -190,12 +209,13 @@ func TestBackend_renew_revoke(t *testing.T) { generatedSecret.TTL = 6 * time.Hour var d struct { - Token string `mapstructure:"secret_id"` + Token string `mapstructure:"secret_id"` + Accessor string `mapstructure:"accessor_id"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) } - log.Printf("[WARN] Generated token: %s", d.Token) + log.Printf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) // Build a client and verify that the credentials work nomadapiConfig := nomadapi.DefaultConfig() @@ -207,7 +227,7 @@ func TestBackend_renew_revoke(t *testing.T) { } log.Printf("[WARN] Verifying that the generated token works...") - _, err = client.Jobs().List, nil + _, err = client.Agent().Members, nil if err != nil { t.Fatal(err) } @@ -228,9 +248,19 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - log.Printf("[WARN] Verifying that the generated token does not work...") - _, err = client.Jobs().List, nil + // Build a management client and verify that the token does not exist anymore + nomadmgmtConfig := nomadapi.DefaultConfig() + nomadmgmtConfig.Address = connData["address"].(string) + nomadmgmtConfig.SecretID = connData["token"].(string) + mgmtclient, err := nomadapi.NewClient(nomadmgmtConfig) + + q := &nomadapi.QueryOptions{ + Namespace: "default", + } + + log.Printf("[WARN] Verifying that the generated token does not exist...") + _, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q) if err == nil { - t.Fatal("expected error") + t.Fatal("err: expected error") } } From 222b9d1c52c9c68736faa8efe9ec4c5adf520374 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 09:35:17 +0100 Subject: [PATCH 009/329] Removing ignore to cleanup function --- builtin/logical/nomad/backend_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index e1b0956b59..b64dc1bea1 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -161,10 +161,8 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - //cleanup, connURL, connToken := prepareTestContainer(t) - //defer cleanup() - //Ignore cleanup until I can find why the bloody test is not working - _, connURL, connToken := prepareTestContainer(t) + cleanup, connURL, connToken := prepareTestContainer(t) + defer cleanup() connData := map[string]interface{}{ "address": connURL, "token": connToken, From b581716b754981e9500ba1c4bb271741648b9ffc Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 11:23:47 +0100 Subject: [PATCH 010/329] Updated API Docs with the Global Token Parameter --- website/source/api/secret/nomad/index.html.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 236e3e7e83..9cdf59b976 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -70,7 +70,10 @@ updated attributes. as a string duration with a time suffix like `"30s"` or `"1h"`. If not provided, the default Vault lease is used. -- `policy` `(string: )` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. +- `policy` `(string: "")` – Comma separated list of Nomad policies the token is going to be created against. These need to be created beforehand in Nomad. + +- `global` `(bool: "")` – Specifies if the token should be global, as defined in the [Nomad Documentation](https://www.nomadproject.io/guides/acl.html#acl-tokens). +ma - `token_type` `(string: "client")` - Specifies the type of token to create when using this role. Valid values are `"client"` or `"management"`. From 84b57b23620f537888264f73d42931b3c595392f Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 14:39:24 +0100 Subject: [PATCH 011/329] Adding vendor dependency --- vendor/vendor.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vendor/vendor.json b/vendor/vendor.json index 541d6c03e3..2ccc97e6db 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1140,6 +1140,13 @@ "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", "revisionTime": "2017-09-14T15:46:24Z" }, + { + "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", + "path": "github.com/hashicorp/nomad/api", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z", + "version": "v0.7.0-beta1" + }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", "path": "github.com/hashicorp/serf/coordinate", From ff4f920917eb5231b314f149b169d9cd9582d406 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 29 Sep 2017 14:39:59 +0100 Subject: [PATCH 012/329] Adding vendor dir --- vendor/github.com/hashicorp/nomad/LICENSE | 363 +++++++ vendor/github.com/hashicorp/nomad/api/acl.go | 186 ++++ .../github.com/hashicorp/nomad/api/agent.go | 267 +++++ .../hashicorp/nomad/api/allocations.go | 157 +++ vendor/github.com/hashicorp/nomad/api/api.go | 777 ++++++++++++++ .../hashicorp/nomad/api/constraint.go | 17 + .../hashicorp/nomad/api/deployments.go | 234 +++++ .../hashicorp/nomad/api/evaluations.go | 97 ++ vendor/github.com/hashicorp/nomad/api/fs.go | 398 +++++++ vendor/github.com/hashicorp/nomad/api/jobs.go | 968 ++++++++++++++++++ .../hashicorp/nomad/api/jobs_testing.go | 110 ++ .../hashicorp/nomad/api/namespace.go | 90 ++ .../github.com/hashicorp/nomad/api/nodes.go | 199 ++++ .../hashicorp/nomad/api/operator.go | 87 ++ vendor/github.com/hashicorp/nomad/api/raw.go | 38 + .../github.com/hashicorp/nomad/api/regions.go | 23 + .../hashicorp/nomad/api/resources.go | 81 ++ .../github.com/hashicorp/nomad/api/search.go | 39 + .../hashicorp/nomad/api/sentinel.go | 79 ++ .../github.com/hashicorp/nomad/api/status.go | 43 + .../github.com/hashicorp/nomad/api/system.go | 23 + .../github.com/hashicorp/nomad/api/tasks.go | 617 +++++++++++ 22 files changed, 4893 insertions(+) create mode 100644 vendor/github.com/hashicorp/nomad/LICENSE create mode 100644 vendor/github.com/hashicorp/nomad/api/acl.go create mode 100644 vendor/github.com/hashicorp/nomad/api/agent.go create mode 100644 vendor/github.com/hashicorp/nomad/api/allocations.go create mode 100644 vendor/github.com/hashicorp/nomad/api/api.go create mode 100644 vendor/github.com/hashicorp/nomad/api/constraint.go create mode 100644 vendor/github.com/hashicorp/nomad/api/deployments.go create mode 100644 vendor/github.com/hashicorp/nomad/api/evaluations.go create mode 100644 vendor/github.com/hashicorp/nomad/api/fs.go create mode 100644 vendor/github.com/hashicorp/nomad/api/jobs.go create mode 100644 vendor/github.com/hashicorp/nomad/api/jobs_testing.go create mode 100644 vendor/github.com/hashicorp/nomad/api/namespace.go create mode 100644 vendor/github.com/hashicorp/nomad/api/nodes.go create mode 100644 vendor/github.com/hashicorp/nomad/api/operator.go create mode 100644 vendor/github.com/hashicorp/nomad/api/raw.go create mode 100644 vendor/github.com/hashicorp/nomad/api/regions.go create mode 100644 vendor/github.com/hashicorp/nomad/api/resources.go create mode 100644 vendor/github.com/hashicorp/nomad/api/search.go create mode 100644 vendor/github.com/hashicorp/nomad/api/sentinel.go create mode 100644 vendor/github.com/hashicorp/nomad/api/status.go create mode 100644 vendor/github.com/hashicorp/nomad/api/system.go create mode 100644 vendor/github.com/hashicorp/nomad/api/tasks.go diff --git a/vendor/github.com/hashicorp/nomad/LICENSE b/vendor/github.com/hashicorp/nomad/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/nomad/api/acl.go b/vendor/github.com/hashicorp/nomad/api/acl.go new file mode 100644 index 0000000000..bac6982375 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/acl.go @@ -0,0 +1,186 @@ +package api + +import ( + "fmt" + "time" +) + +// ACLPolicies is used to query the ACL Policy endpoints. +type ACLPolicies struct { + client *Client +} + +// ACLPolicies returns a new handle on the ACL policies. +func (c *Client) ACLPolicies() *ACLPolicies { + return &ACLPolicies{client: c} +} + +// List is used to dump all of the policies. +func (a *ACLPolicies) List(q *QueryOptions) ([]*ACLPolicyListStub, *QueryMeta, error) { + var resp []*ACLPolicyListStub + qm, err := a.client.query("/v1/acl/policies", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Upsert is used to create or update a policy +func (a *ACLPolicies) Upsert(policy *ACLPolicy, q *WriteOptions) (*WriteMeta, error) { + if policy == nil || policy.Name == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.write("/v1/acl/policy/"+policy.Name, policy, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a policy +func (a *ACLPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { + if policyName == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.delete("/v1/acl/policy/"+policyName, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a specific policy +func (a *ACLPolicies) Info(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + if policyName == "" { + return nil, nil, fmt.Errorf("missing policy name") + } + var resp ACLPolicy + wm, err := a.client.query("/v1/acl/policy/"+policyName, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// ACLTokens is used to query the ACL token endpoints. +type ACLTokens struct { + client *Client +} + +// ACLTokens returns a new handle on the ACL tokens. +func (c *Client) ACLTokens() *ACLTokens { + return &ACLTokens{client: c} +} + +// Bootstrap is used to get the initial bootstrap token +func (a *ACLTokens) Bootstrap(q *WriteOptions) (*ACLToken, *WriteMeta, error) { + var resp ACLToken + wm, err := a.client.write("/v1/acl/bootstrap", nil, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// List is used to dump all of the tokens. +func (a *ACLTokens) List(q *QueryOptions) ([]*ACLTokenListStub, *QueryMeta, error) { + var resp []*ACLTokenListStub + qm, err := a.client.query("/v1/acl/tokens", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Create is used to create a token +func (a *ACLTokens) Create(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID != "" { + return nil, nil, fmt.Errorf("cannot specify Accessor ID") + } + var resp ACLToken + wm, err := a.client.write("/v1/acl/token", token, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Update is used to update an existing token +func (a *ACLTokens) Update(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID == "" { + return nil, nil, fmt.Errorf("missing accessor ID") + } + var resp ACLToken + wm, err := a.client.write("/v1/acl/token/"+token.AccessorID, + token, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Delete is used to delete a token +func (a *ACLTokens) Delete(accessorID string, q *WriteOptions) (*WriteMeta, error) { + if accessorID == "" { + return nil, fmt.Errorf("missing accessor ID") + } + wm, err := a.client.delete("/v1/acl/token/"+accessorID, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a token +func (a *ACLTokens) Info(accessorID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { + if accessorID == "" { + return nil, nil, fmt.Errorf("missing accessor ID") + } + var resp ACLToken + wm, err := a.client.query("/v1/acl/token/"+accessorID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// ACLPolicyListStub is used to for listing ACL policies +type ACLPolicyListStub struct { + Name string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLPolicy is used to represent an ACL policy +type ACLPolicy struct { + Name string + Description string + Rules string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLToken represents a client token which is used to Authenticate +type ACLToken struct { + AccessorID string + SecretID string + Name string + Type string + Policies []string + Global bool + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLTokenListStub struct { + AccessorID string + Name string + Type string + Policies []string + Global bool + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} diff --git a/vendor/github.com/hashicorp/nomad/api/agent.go b/vendor/github.com/hashicorp/nomad/api/agent.go new file mode 100644 index 0000000000..e8b063ff18 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/agent.go @@ -0,0 +1,267 @@ +package api + +import ( + "fmt" + "net/url" +) + +// Agent encapsulates an API client which talks to Nomad's +// agent endpoints for a specific node. +type Agent struct { + client *Client + + // Cache static agent info + nodeName string + datacenter string + region string +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + Messages map[string]string + Keys map[string]int + NumNodes int +} + +// KeyringRequest is request objects for serf key operations. +type KeyringRequest struct { + Key string +} + +// Agent returns a new agent which can be used to query +// the agent-specific endpoints. +func (c *Client) Agent() *Agent { + return &Agent{client: c} +} + +// Self is used to query the /v1/agent/self endpoint and +// returns information specific to the running agent. +func (a *Agent) Self() (*AgentSelf, error) { + var out *AgentSelf + + // Query the self endpoint on the agent + _, err := a.client.query("/v1/agent/self", &out, nil) + if err != nil { + return nil, fmt.Errorf("failed querying self endpoint: %s", err) + } + + // Populate the cache for faster queries + a.populateCache(out) + + return out, nil +} + +// populateCache is used to insert various pieces of static +// data into the agent handle. This is used during subsequent +// lookups for the same data later on to save the round trip. +func (a *Agent) populateCache(self *AgentSelf) { + if a.nodeName == "" { + a.nodeName = self.Member.Name + } + if a.datacenter == "" { + if val, ok := self.Config["Datacenter"]; ok { + a.datacenter, _ = val.(string) + } + } + if a.region == "" { + if val, ok := self.Config["Region"]; ok { + a.region, _ = val.(string) + } + } +} + +// NodeName is used to query the Nomad agent for its node name. +func (a *Agent) NodeName() (string, error) { + // Return from cache if we have it + if a.nodeName != "" { + return a.nodeName, nil + } + + // Query the node name + _, err := a.Self() + return a.nodeName, err +} + +// Datacenter is used to return the name of the datacenter which +// the agent is a member of. +func (a *Agent) Datacenter() (string, error) { + // Return from cache if we have it + if a.datacenter != "" { + return a.datacenter, nil + } + + // Query the agent for the DC + _, err := a.Self() + return a.datacenter, err +} + +// Region is used to look up the region the agent is in. +func (a *Agent) Region() (string, error) { + // Return from cache if we have it + if a.region != "" { + return a.region, nil + } + + // Query the agent for the region + _, err := a.Self() + return a.region, err +} + +// Join is used to instruct a server node to join another server +// via the gossip protocol. Multiple addresses may be specified. +// We attempt to join all of the hosts in the list. Returns the +// number of nodes successfully joined and any error. If one or +// more nodes have a successful result, no error is returned. +func (a *Agent) Join(addrs ...string) (int, error) { + // Accumulate the addresses + v := url.Values{} + for _, addr := range addrs { + v.Add("address", addr) + } + + // Send the join request + var resp joinResponse + _, err := a.client.write("/v1/agent/join?"+v.Encode(), nil, &resp, nil) + if err != nil { + return 0, fmt.Errorf("failed joining: %s", err) + } + if resp.Error != "" { + return 0, fmt.Errorf("failed joining: %s", resp.Error) + } + return resp.NumJoined, nil +} + +// Members is used to query all of the known server members +func (a *Agent) Members() (*ServerMembers, error) { + var resp *ServerMembers + + // Query the known members + _, err := a.client.query("/v1/agent/members", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// ForceLeave is used to eject an existing node from the cluster. +func (a *Agent) ForceLeave(node string) error { + _, err := a.client.write("/v1/agent/force-leave?node="+node, nil, nil, nil) + return err +} + +// Servers is used to query the list of servers on a client node. +func (a *Agent) Servers() ([]string, error) { + var resp []string + _, err := a.client.query("/v1/agent/servers", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetServers is used to update the list of servers on a client node. +func (a *Agent) SetServers(addrs []string) error { + // Accumulate the addresses + v := url.Values{} + for _, addr := range addrs { + v.Add("address", addr) + } + + _, err := a.client.write("/v1/agent/servers?"+v.Encode(), nil, nil, nil) + return err +} + +// ListKeys returns the list of installed keys +func (a *Agent) ListKeys() (*KeyringResponse, error) { + var resp KeyringResponse + _, err := a.client.query("/v1/agent/keyring/list", &resp, nil) + if err != nil { + return nil, err + } + return &resp, nil +} + +// InstallKey installs a key in the keyrings of all the serf members +func (a *Agent) InstallKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/install", &args, &resp, nil) + return &resp, err +} + +// UseKey uses a key from the keyring of serf members +func (a *Agent) UseKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/use", &args, &resp, nil) + return &resp, err +} + +// RemoveKey removes a particular key from keyrings of serf members +func (a *Agent) RemoveKey(key string) (*KeyringResponse, error) { + args := KeyringRequest{ + Key: key, + } + var resp KeyringResponse + _, err := a.client.write("/v1/agent/keyring/remove", &args, &resp, nil) + return &resp, err +} + +// joinResponse is used to decode the response we get while +// sending a member join request. +type joinResponse struct { + NumJoined int `json:"num_joined"` + Error string `json:"error"` +} + +type ServerMembers struct { + ServerName string + ServerRegion string + ServerDC string + Members []*AgentMember +} + +type AgentSelf struct { + Config map[string]interface{} `json:"config"` + Member AgentMember `json:"member"` + Stats map[string]map[string]string `json:"stats"` +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status string + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AgentMembersNameSort implements sort.Interface for []*AgentMembersNameSort +// based on the Name, DC and Region +type AgentMembersNameSort []*AgentMember + +func (a AgentMembersNameSort) Len() int { return len(a) } +func (a AgentMembersNameSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a AgentMembersNameSort) Less(i, j int) bool { + if a[i].Tags["region"] != a[j].Tags["region"] { + return a[i].Tags["region"] < a[j].Tags["region"] + } + + if a[i].Tags["dc"] != a[j].Tags["dc"] { + return a[i].Tags["dc"] < a[j].Tags["dc"] + } + + return a[i].Name < a[j].Name + +} diff --git a/vendor/github.com/hashicorp/nomad/api/allocations.go b/vendor/github.com/hashicorp/nomad/api/allocations.go new file mode 100644 index 0000000000..74aaaf3fd3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/allocations.go @@ -0,0 +1,157 @@ +package api + +import ( + "fmt" + "sort" + "time" +) + +var ( + // NodeDownErr marks an operation as not able to complete since the node is + // down. + NodeDownErr = fmt.Errorf("node down") +) + +// Allocations is used to query the alloc-related endpoints. +type Allocations struct { + client *Client +} + +// Allocations returns a handle on the allocs endpoints. +func (c *Client) Allocations() *Allocations { + return &Allocations{client: c} +} + +// List returns a list of all of the allocations. +func (a *Allocations) List(q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := a.client.query("/v1/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +func (a *Allocations) PrefixList(prefix string) ([]*AllocationListStub, *QueryMeta, error) { + return a.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to retrieve a single allocation. +func (a *Allocations) Info(allocID string, q *QueryOptions) (*Allocation, *QueryMeta, error) { + var resp Allocation + qm, err := a.client.query("/v1/allocation/"+allocID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceUsage, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + var resp AllocResourceUsage + _, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/stats", &resp, nil) + return &resp, err +} + +func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return err + } + + var resp struct{} + _, err = nodeClient.query("/v1/client/allocation/"+alloc.ID+"/gc", &resp, nil) + return err +} + +// Allocation is used for serialization of allocations. +type Allocation struct { + ID string + Namespace string + EvalID string + Name string + NodeID string + JobID string + Job *Job + TaskGroup string + Resources *Resources + TaskResources map[string]*Resources + Services map[string]string + Metrics *AllocationMetric + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentID string + DeploymentStatus *AllocDeploymentStatus + PreviousAllocation string + CreateIndex uint64 + ModifyIndex uint64 + AllocModifyIndex uint64 + CreateTime int64 +} + +// AllocationMetric is used to deserialize allocation metrics. +type AllocationMetric struct { + NodesEvaluated int + NodesFiltered int + NodesAvailable map[string]int + ClassFiltered map[string]int + ConstraintFiltered map[string]int + NodesExhausted int + ClassExhausted map[string]int + DimensionExhausted map[string]int + Scores map[string]float64 + AllocationTime time.Duration + CoalescedFailures int +} + +// AllocationListStub is used to return a subset of an allocation +// during list operations. +type AllocationListStub struct { + ID string + EvalID string + Name string + NodeID string + JobID string + JobVersion uint64 + TaskGroup string + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentStatus *AllocDeploymentStatus + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 +} + +// AllocDeploymentStatus captures the status of the allocation as part of the +// deployment. This can include things like if the allocation has been marked as +// heatlhy. +type AllocDeploymentStatus struct { + Healthy *bool + ModifyIndex uint64 +} + +// AllocIndexSort reverse sorts allocs by CreateIndex. +type AllocIndexSort []*AllocationListStub + +func (a AllocIndexSort) Len() int { + return len(a) +} + +func (a AllocIndexSort) Less(i, j int) bool { + return a[i].CreateIndex > a[j].CreateIndex +} + +func (a AllocIndexSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/api.go b/vendor/github.com/hashicorp/nomad/api/api.go new file mode 100644 index 0000000000..a3d476d64a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/api.go @@ -0,0 +1,777 @@ +package api + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + rootcerts "github.com/hashicorp/go-rootcerts" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the region provided + // by the Config + Region string + + // Namespace is the target namespace for the query. + Namespace string + + // AllowStale allows any Nomad server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // If set, used as prefix for resource list searches + Prefix string + + // Set HTTP parameters on the query. + Params map[string]string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the region provided + // by the Config + Region string + + // Namespace is the target namespace for the write. + Namespace string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Nomad agent + Address string + + // Region to use. If not provided, the default agent region is used. + Region string + + // SecretID to use. This can be overwritten per request. + SecretID string + + // Namespace to use. If not provided the default namespace is used. + Namespace string + + // httpClient is the client to use. Default will be used if not provided. + httpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // TLSConfig provides the various TLS related configurations for the http + // client + TLSConfig *TLSConfig +} + +// ClientConfig copies the configuration with a new client address, region, and +// whether the client has TLS enabled. +func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config { + scheme := "http" + if tlsEnabled { + scheme = "https" + } + defaultConfig := DefaultConfig() + config := &Config{ + Address: fmt.Sprintf("%s://%s", scheme, address), + Region: region, + Namespace: c.Namespace, + httpClient: defaultConfig.httpClient, + SecretID: c.SecretID, + HttpAuth: c.HttpAuth, + WaitTime: c.WaitTime, + TLSConfig: c.TLSConfig.Copy(), + } + if tlsEnabled && config.TLSConfig != nil { + config.TLSConfig.TLSServerName = fmt.Sprintf("client.%s.nomad", region) + } + + return config +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Nomad. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Nomad server SSL certificate. + CACert string + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Nomad server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Nomad communication + ClientCert string + + // ClientKey is the path to the private key for Nomad communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +func (t *TLSConfig) Copy() *TLSConfig { + if t == nil { + return nil + } + + nt := new(TLSConfig) + *nt = *t + return nt +} + +// DefaultConfig returns a default configuration for the client +func DefaultConfig() *Config { + config := &Config{ + Address: "http://127.0.0.1:4646", + httpClient: cleanhttp.DefaultClient(), + TLSConfig: &TLSConfig{}, + } + transport := config.httpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if addr := os.Getenv("NOMAD_ADDR"); addr != "" { + config.Address = addr + } + if v := os.Getenv("NOMAD_REGION"); v != "" { + config.Region = v + } + if v := os.Getenv("NOMAD_NAMESPACE"); v != "" { + config.Namespace = v + } + if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + // Read TLS specific env vars + if v := os.Getenv("NOMAD_CACERT"); v != "" { + config.TLSConfig.CACert = v + } + if v := os.Getenv("NOMAD_CAPATH"); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv("NOMAD_CLIENT_CERT"); v != "" { + config.TLSConfig.ClientCert = v + } + if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" { + config.TLSConfig.ClientKey = v + } + if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" { + if insecure, err := strconv.ParseBool(v); err == nil { + config.TLSConfig.Insecure = insecure + } + } + if v := os.Getenv("NOMAD_TOKEN"); v != "" { + config.SecretID = v + } + return config +} + +// ConfigureTLS applies a set of TLS configurations to the the HTTP client. +func (c *Config) ConfigureTLS() error { + if c.TLSConfig == nil { + return nil + } + if c.httpClient == nil { + return fmt.Errorf("config HTTP Client must be set") + } + + var clientCert tls.Certificate + foundClientCert := false + if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" { + if c.TLSConfig.ClientCert != "" && c.TLSConfig.ClientKey != "" { + var err error + clientCert, err = tls.LoadX509KeyPair(c.TLSConfig.ClientCert, c.TLSConfig.ClientKey) + if err != nil { + return err + } + foundClientCert = true + } else { + return fmt.Errorf("Both client cert and client key must be provided") + } + } + + clientTLSConfig := c.httpClient.Transport.(*http.Transport).TLSClientConfig + rootConfig := &rootcerts.Config{ + CAFile: c.TLSConfig.CACert, + CAPath: c.TLSConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + + clientTLSConfig.InsecureSkipVerify = c.TLSConfig.Insecure + + if foundClientCert { + clientTLSConfig.Certificates = []tls.Certificate{clientCert} + } + if c.TLSConfig.TLSServerName != "" { + clientTLSConfig.ServerName = c.TLSConfig.TLSServerName + } + + return nil +} + +// Client provides a client to the Nomad API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if config.Address == "" { + config.Address = defConfig.Address + } else if _, err := url.Parse(config.Address); err != nil { + return nil, fmt.Errorf("invalid address '%s': %v", config.Address, err) + } + + if config.httpClient == nil { + config.httpClient = defConfig.httpClient + } + + // Configure the TLS cofigurations + if err := config.ConfigureTLS(); err != nil { + return nil, err + } + + client := &Client{ + config: *config, + } + return client, nil +} + +// SetRegion sets the region to forward API requests to. +func (c *Client) SetRegion(region string) { + c.config.Region = region +} + +// SetNamespace sets the namespace to forward API requests to. +func (c *Client) SetNamespace(namespace string) { + c.config.Namespace = namespace +} + +// GetNodeClient returns a new Client that will dial the specified node. If the +// QueryOptions is set, its region will be used. +func (c *Client) GetNodeClient(nodeID string, q *QueryOptions) (*Client, error) { + return c.getNodeClientImpl(nodeID, q, c.Nodes().Info) +} + +// nodeLookup is the definition of a function used to lookup a node. This is +// largely used to mock the lookup in tests. +type nodeLookup func(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) + +// getNodeClientImpl is the implementation of creating a API client for +// contacting a node. It takes a function to lookup the node such that it can be +// mocked during tests. +func (c *Client) getNodeClientImpl(nodeID string, q *QueryOptions, lookup nodeLookup) (*Client, error) { + node, _, err := lookup(nodeID, q) + if err != nil { + return nil, err + } + if node.Status == "down" { + return nil, NodeDownErr + } + if node.HTTPAddr == "" { + return nil, fmt.Errorf("http addr of node %q (%s) is not advertised", node.Name, nodeID) + } + + var region string + switch { + case q != nil && q.Region != "": + // Prefer the region set in the query parameter + region = q.Region + case c.config.Region != "": + // If the client is configured for a particular region use that + region = c.config.Region + default: + // No region information is given so use the default. + region = "global" + } + + // Get an API client for the node + conf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled) + return NewClient(conf) +} + +// SetSecretID sets the ACL token secret for API requests. +func (c *Client) SetSecretID(secretID string) { + c.config.SecretID = secretID +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + token string + body io.Reader + obj interface{} +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Region != "" { + r.params.Set("region", q.Region) + } + if q.Namespace != "" { + r.params.Set("namespace", q.Namespace) + } + if q.SecretID != "" { + r.token = q.SecretID + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Prefix != "" { + r.params.Set("prefix", q.Prefix) + } + for k, v := range q.Params { + r.params.Set(k, v) + } +} + +// durToMsec converts a duration to a millisecond specified string +func durToMsec(dur time.Duration) string { + return fmt.Sprintf("%dms", dur/time.Millisecond) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Region != "" { + r.params.Set("region", q.Region) + } + if q.Namespace != "" { + r.params.Set("namespace", q.Namespace) + } + if q.SecretID != "" { + r.token = q.SecretID + } +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + if b, err := encodeBody(r.obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // Optionally configure HTTP basic authentication + if r.url.User != nil { + username := r.url.User.Username() + password, _ := r.url.User.Password() + req.SetBasicAuth(username, password) + } else if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + + req.Header.Add("Accept-Encoding", "gzip") + if r.token != "" { + req.Header.Set("X-Nomad-Token", r.token) + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) (*request, error) { + base, _ := url.Parse(c.config.Address) + u, err := url.Parse(path) + if err != nil { + return nil, err + } + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: base.Scheme, + User: base.User, + Host: base.Host, + Path: u.Path, + }, + params: make(map[string][]string), + } + if c.config.Region != "" { + r.params.Set("region", c.config.Region) + } + if c.config.Namespace != "" { + r.params.Set("namespace", c.config.Namespace) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.SecretID != "" { + r.token = r.config.SecretID + } + + // Add in the query parameters, if any + for key, values := range u.Query() { + for _, value := range values { + r.params.Add(key, value) + } + } + + return r, nil +} + +// multiCloser is to wrap a ReadCloser such that when close is called, multiple +// Closes occur. +type multiCloser struct { + reader io.Reader + inorderClose []io.Closer +} + +func (m *multiCloser) Close() error { + for _, c := range m.inorderClose { + if err := c.Close(); err != nil { + return err + } + } + return nil +} + +func (m *multiCloser) Read(p []byte) (int, error) { + return m.reader.Read(p) +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.httpClient.Do(req) + diff := time.Now().Sub(start) + + // If the response is compressed, we swap the body's reader. + if resp != nil && resp.Header != nil { + var reader io.ReadCloser + switch resp.Header.Get("Content-Encoding") { + case "gzip": + greader, err := gzip.NewReader(resp.Body) + if err != nil { + return 0, nil, err + } + + // The gzip reader doesn't close the wrapped reader so we use + // multiCloser. + reader = &multiCloser{ + reader: greader, + inorderClose: []io.Closer{greader, resp.Body}, + } + default: + reader = resp.Body + } + resp.Body = reader + } + + return diff, resp, err +} + +// rawQuery makes a GET request to the specified endpoint but returns just the +// response body. +func (c *Client) rawQuery(endpoint string, q *QueryOptions) (io.ReadCloser, error) { + r, err := c.newRequest("GET", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + _, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Nomad conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r, err := c.newRequest("GET", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// putQuery is used to do a PUT request when doing a read against an endpoint +// and deserialize the response into an interface using standard Nomad +// conventions. +func (c *Client) putQuery(endpoint string, in, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r, err := c.newRequest("PUT", endpoint) + if err != nil { + return nil, err + } + r.setQueryOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r, err := c.newRequest("PUT", endpoint) + if err != nil { + return nil, err + } + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + parseWriteMeta(resp, wm) + + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// delete is used to do a DELETE request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (c *Client) delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r, err := c.newRequest("DELETE", endpoint) + if err != nil { + return nil, err + } + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + parseWriteMeta(resp, wm) + + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Nomad-Index + index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Nomad-LastContact + last, err := strconv.ParseUint(header.Get("X-Nomad-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Nomad-KnownLeader + switch header.Get("X-Nomad-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + return nil +} + +// parseWriteMeta is used to help parse write meta-data +func parseWriteMeta(resp *http.Response, q *WriteMeta) error { + header := resp.Header + + // Parse the X-Nomad-Index + index, err := strconv.ParseUint(header.Get("X-Nomad-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Nomad-Index: %v", err) + } + q.LastIndex = index + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/constraint.go b/vendor/github.com/hashicorp/nomad/api/constraint.go new file mode 100644 index 0000000000..ec3a37a641 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/constraint.go @@ -0,0 +1,17 @@ +package api + +// Constraint is used to serialize a job placement constraint. +type Constraint struct { + LTarget string + RTarget string + Operand string +} + +// NewConstraint generates a new job placement constraint. +func NewConstraint(left, operand, right string) *Constraint { + return &Constraint{ + LTarget: left, + RTarget: right, + Operand: operand, + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/deployments.go b/vendor/github.com/hashicorp/nomad/api/deployments.go new file mode 100644 index 0000000000..0b996f73c0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/deployments.go @@ -0,0 +1,234 @@ +package api + +import ( + "sort" +) + +// Deployments is used to query the deployments endpoints. +type Deployments struct { + client *Client +} + +// Deployments returns a new handle on the deployments. +func (c *Client) Deployments() *Deployments { + return &Deployments{client: c} +} + +// List is used to dump all of the deployments. +func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) { + var resp []*Deployment + qm, err := d.client.query("/v1/deployments", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(DeploymentIndexSort(resp)) + return resp, qm, nil +} + +func (d *Deployments) PrefixList(prefix string) ([]*Deployment, *QueryMeta, error) { + return d.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a single deployment by its ID. +func (d *Deployments) Info(deploymentID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { + var resp Deployment + qm, err := d.client.query("/v1/deployment/"+deploymentID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Allocations is used to retrieve a set of allocations that are part of the +// deployment +func (d *Deployments) Allocations(deploymentID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := d.client.query("/v1/deployment/allocations/"+deploymentID, &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Fail is used to fail the given deployment. +func (d *Deployments) Fail(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentFailRequest{ + DeploymentID: deploymentID, + } + wm, err := d.client.write("/v1/deployment/fail/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Pause is used to pause or unpause the given deployment. +func (d *Deployments) Pause(deploymentID string, pause bool, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPauseRequest{ + DeploymentID: deploymentID, + Pause: pause, + } + wm, err := d.client.write("/v1/deployment/pause/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// PromoteAll is used to promote all canaries in the given deployment +func (d *Deployments) PromoteAll(deploymentID string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPromoteRequest{ + DeploymentID: deploymentID, + All: true, + } + wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// PromoteGroups is used to promote canaries in the passed groups in the given deployment +func (d *Deployments) PromoteGroups(deploymentID string, groups []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentPromoteRequest{ + DeploymentID: deploymentID, + Groups: groups, + } + wm, err := d.client.write("/v1/deployment/promote/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// SetAllocHealth is used to set allocation health for allocs that are part of +// the given deployment +func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []string, q *WriteOptions) (*DeploymentUpdateResponse, *WriteMeta, error) { + var resp DeploymentUpdateResponse + req := &DeploymentAllocHealthRequest{ + DeploymentID: deploymentID, + HealthyAllocationIDs: healthy, + UnhealthyAllocationIDs: unhealthy, + } + wm, err := d.client.write("/v1/deployment/allocation-health/"+deploymentID, req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Deployment is used to serialize an deployment. +type Deployment struct { + ID string + Namespace string + JobID string + JobVersion uint64 + JobModifyIndex uint64 + JobCreateIndex uint64 + TaskGroups map[string]*DeploymentState + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// DeploymentState tracks the state of a deployment for a given task group. +type DeploymentState struct { + PlacedCanaries []string + AutoRevert bool + Promoted bool + DesiredCanaries int + DesiredTotal int + PlacedAllocs int + HealthyAllocs int + UnhealthyAllocs int +} + +// DeploymentIndexSort is a wrapper to sort deployments by CreateIndex. We +// reverse the test so that we get the highest index first. +type DeploymentIndexSort []*Deployment + +func (d DeploymentIndexSort) Len() int { + return len(d) +} + +func (d DeploymentIndexSort) Less(i, j int) bool { + return d[i].CreateIndex > d[j].CreateIndex +} + +func (d DeploymentIndexSort) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} + +// DeploymentUpdateResponse is used to respond to a deployment change. The +// response will include the modify index of the deployment as well as details +// of any triggered evaluation. +type DeploymentUpdateResponse struct { + EvalID string + EvalCreateIndex uint64 + DeploymentModifyIndex uint64 + RevertedJobVersion *uint64 + WriteMeta +} + +// DeploymentAllocHealthRequest is used to set the health of a set of +// allocations as part of a deployment. +type DeploymentAllocHealthRequest struct { + DeploymentID string + + // Marks these allocations as healthy, allow further allocations + // to be rolled. + HealthyAllocationIDs []string + + // Any unhealthy allocations fail the deployment + UnhealthyAllocationIDs []string + + WriteRequest +} + +// DeploymentPromoteRequest is used to promote task groups in a deployment +type DeploymentPromoteRequest struct { + DeploymentID string + + // All is to promote all task groups + All bool + + // Groups is used to set the promotion status per task group + Groups []string + + WriteRequest +} + +// DeploymentPauseRequest is used to pause a deployment +type DeploymentPauseRequest struct { + DeploymentID string + + // Pause sets the pause status + Pause bool + + WriteRequest +} + +// DeploymentSpecificRequest is used to make a request specific to a particular +// deployment +type DeploymentSpecificRequest struct { + DeploymentID string + QueryOptions +} + +// DeploymentFailRequest is used to fail a particular deployment +type DeploymentFailRequest struct { + DeploymentID string + WriteRequest +} + +// SingleDeploymentResponse is used to respond with a single deployment +type SingleDeploymentResponse struct { + Deployment *Deployment + QueryMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/evaluations.go b/vendor/github.com/hashicorp/nomad/api/evaluations.go new file mode 100644 index 0000000000..40aee69757 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/evaluations.go @@ -0,0 +1,97 @@ +package api + +import ( + "sort" + "time" +) + +// Evaluations is used to query the evaluation endpoints. +type Evaluations struct { + client *Client +} + +// Evaluations returns a new handle on the evaluations. +func (c *Client) Evaluations() *Evaluations { + return &Evaluations{client: c} +} + +// List is used to dump all of the evaluations. +func (e *Evaluations) List(q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { + var resp []*Evaluation + qm, err := e.client.query("/v1/evaluations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(EvalIndexSort(resp)) + return resp, qm, nil +} + +func (e *Evaluations) PrefixList(prefix string) ([]*Evaluation, *QueryMeta, error) { + return e.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a single evaluation by its ID. +func (e *Evaluations) Info(evalID string, q *QueryOptions) (*Evaluation, *QueryMeta, error) { + var resp Evaluation + qm, err := e.client.query("/v1/evaluation/"+evalID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Allocations is used to retrieve a set of allocations given +// an evaluation ID. +func (e *Evaluations) Allocations(evalID string, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + qm, err := e.client.query("/v1/evaluation/"+evalID+"/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Evaluation is used to serialize an evaluation. +type Evaluation struct { + ID string + Priority int + Type string + TriggeredBy string + Namespace string + JobID string + JobModifyIndex uint64 + NodeID string + NodeModifyIndex uint64 + DeploymentID string + Status string + StatusDescription string + Wait time.Duration + NextEval string + PreviousEval string + BlockedEval string + FailedTGAllocs map[string]*AllocationMetric + ClassEligibility map[string]bool + EscapedComputedClass bool + AnnotatePlan bool + QueuedAllocations map[string]int + SnapshotIndex uint64 + CreateIndex uint64 + ModifyIndex uint64 +} + +// EvalIndexSort is a wrapper to sort evaluations by CreateIndex. +// We reverse the test so that we get the highest index first. +type EvalIndexSort []*Evaluation + +func (e EvalIndexSort) Len() int { + return len(e) +} + +func (e EvalIndexSort) Less(i, j int) bool { + return e[i].CreateIndex > e[j].CreateIndex +} + +func (e EvalIndexSort) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/fs.go b/vendor/github.com/hashicorp/nomad/api/fs.go new file mode 100644 index 0000000000..c412db5416 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/fs.go @@ -0,0 +1,398 @@ +package api + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "sync" + "time" +) + +const ( + // OriginStart and OriginEnd are the available parameters for the origin + // argument when streaming a file. They respectively offset from the start + // and end of a file. + OriginStart = "start" + OriginEnd = "end" +) + +// AllocFileInfo holds information about a file inside the AllocDir +type AllocFileInfo struct { + Name string + IsDir bool + Size int64 + FileMode string + ModTime time.Time +} + +// StreamFrame is used to frame data of a file when streaming +type StreamFrame struct { + Offset int64 `json:",omitempty"` + Data []byte `json:",omitempty"` + File string `json:",omitempty"` + FileEvent string `json:",omitempty"` +} + +// IsHeartbeat returns if the frame is a heartbeat frame +func (s *StreamFrame) IsHeartbeat() bool { + return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0 +} + +// AllocFS is used to introspect an allocation directory on a Nomad client +type AllocFS struct { + client *Client +} + +// AllocFS returns an handle to the AllocFS endpoints +func (c *Client) AllocFS() *AllocFS { + return &AllocFS{client: c} +} + +// List is used to list the files at a given path of an allocation directory +func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*AllocFileInfo, *QueryMeta, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + var resp []*AllocFileInfo + qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/ls/%s", alloc.ID), &resp, q) + if err != nil { + return nil, nil, err + } + + return resp, qm, nil +} + +// Stat is used to stat a file at a given path of an allocation directory +func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocFileInfo, *QueryMeta, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + var resp AllocFileInfo + qm, err := nodeClient.query(fmt.Sprintf("/v1/client/fs/stat/%s", alloc.ID), &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// ReadAt is used to read bytes at a given offset until limit at the given path +// in an allocation directory. If limit is <= 0, there is no limit. +func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.ReadCloser, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + q.Params["offset"] = strconv.FormatInt(offset, 10) + q.Params["limit"] = strconv.FormatInt(limit, 10) + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/readat/%s", alloc.ID), q) + if err != nil { + return nil, err + } + return r, nil +} + +// Cat is used to read contents of a file at the given path in an allocation +// directory +func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadCloser, error) { + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + return nil, err + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/cat/%s", alloc.ID), q) + if err != nil { + return nil, err + } + return r, nil +} + +// Stream streams the content of a file blocking on EOF. +// The parameters are: +// * path: path to file to stream. +// * offset: The offset to start streaming data at. +// * origin: Either "start" or "end" and defines from where the offset is applied. +// * cancel: A channel that when closed, streaming will end. +// +// The return value is a channel that will emit StreamFrames as they are read. +func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64, + cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + + errCh := make(chan error, 1) + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + errCh <- err + return nil, errCh + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["path"] = path + q.Params["offset"] = strconv.FormatInt(offset, 10) + q.Params["origin"] = origin + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID), q) + if err != nil { + errCh <- err + return nil, errCh + } + + // Create the output channel + frames := make(chan *StreamFrame, 10) + + go func() { + // Close the body + defer r.Close() + + // Create a decoder + dec := json.NewDecoder(r) + + for { + // Check if we have been cancelled + select { + case <-cancel: + return + default: + } + + // Decode the next frame + var frame StreamFrame + if err := dec.Decode(&frame); err != nil { + errCh <- err + close(frames) + return + } + + // Discard heartbeat frames + if frame.IsHeartbeat() { + continue + } + + frames <- &frame + } + }() + + return frames, errCh +} + +// Logs streams the content of a tasks logs blocking on EOF. +// The parameters are: +// * allocation: the allocation to stream from. +// * follow: Whether the logs should be followed. +// * task: the tasks name to stream logs for. +// * logType: Either "stdout" or "stderr" +// * origin: Either "start" or "end" and defines from where the offset is applied. +// * offset: The offset to start streaming data at. +// * cancel: A channel that when closed, streaming will end. +// +// The return value is a channel that will emit StreamFrames as they are read. +func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin string, + offset int64, cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, <-chan error) { + + errCh := make(chan error, 1) + nodeClient, err := a.client.GetNodeClient(alloc.NodeID, q) + if err != nil { + errCh <- err + return nil, errCh + } + + if q == nil { + q = &QueryOptions{} + } + if q.Params == nil { + q.Params = make(map[string]string) + } + + q.Params["follow"] = strconv.FormatBool(follow) + q.Params["task"] = task + q.Params["type"] = logType + q.Params["origin"] = origin + q.Params["offset"] = strconv.FormatInt(offset, 10) + + r, err := nodeClient.rawQuery(fmt.Sprintf("/v1/client/fs/logs/%s", alloc.ID), q) + if err != nil { + errCh <- err + return nil, errCh + } + + // Create the output channel + frames := make(chan *StreamFrame, 10) + + go func() { + // Close the body + defer r.Close() + + // Create a decoder + dec := json.NewDecoder(r) + + for { + // Check if we have been cancelled + select { + case <-cancel: + return + default: + } + + // Decode the next frame + var frame StreamFrame + if err := dec.Decode(&frame); err != nil { + errCh <- err + close(frames) + return + } + + // Discard heartbeat frames + if frame.IsHeartbeat() { + continue + } + + frames <- &frame + } + }() + + return frames, errCh +} + +// FrameReader is used to convert a stream of frames into a read closer. +type FrameReader struct { + frames <-chan *StreamFrame + errCh <-chan error + cancelCh chan struct{} + + closedLock sync.Mutex + closed bool + + unblockTime time.Duration + + frame *StreamFrame + frameOffset int + + byteOffset int +} + +// NewFrameReader takes a channel of frames and returns a FrameReader which +// implements io.ReadCloser +func NewFrameReader(frames <-chan *StreamFrame, errCh <-chan error, cancelCh chan struct{}) *FrameReader { + return &FrameReader{ + frames: frames, + errCh: errCh, + cancelCh: cancelCh, + } +} + +// SetUnblockTime sets the time to unblock and return zero bytes read. If the +// duration is unset or is zero or less, the read will block til data is read. +func (f *FrameReader) SetUnblockTime(d time.Duration) { + f.unblockTime = d +} + +// Offset returns the offset into the stream. +func (f *FrameReader) Offset() int { + return f.byteOffset +} + +// Read reads the data of the incoming frames into the bytes buffer. Returns EOF +// when there are no more frames. +func (f *FrameReader) Read(p []byte) (n int, err error) { + f.closedLock.Lock() + closed := f.closed + f.closedLock.Unlock() + if closed { + return 0, io.EOF + } + + if f.frame == nil { + var unblock <-chan time.Time + if f.unblockTime.Nanoseconds() > 0 { + unblock = time.After(f.unblockTime) + } + + select { + case frame, ok := <-f.frames: + if !ok { + return 0, io.EOF + } + f.frame = frame + + // Store the total offset into the file + f.byteOffset = int(f.frame.Offset) + case <-unblock: + return 0, nil + case err := <-f.errCh: + return 0, err + case <-f.cancelCh: + return 0, io.EOF + } + } + + // Copy the data out of the frame and update our offset + n = copy(p, f.frame.Data[f.frameOffset:]) + f.frameOffset += n + + // Clear the frame and its offset once we have read everything + if len(f.frame.Data) == f.frameOffset { + f.frame = nil + f.frameOffset = 0 + } + + return n, nil +} + +// Close cancels the stream of frames +func (f *FrameReader) Close() error { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return nil + } + + close(f.cancelCh) + f.closed = true + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go new file mode 100644 index 0000000000..4ec71af4ae --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -0,0 +1,968 @@ +package api + +import ( + "fmt" + "net/url" + "sort" + "strconv" + "time" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + // JobTypeService indicates a long-running processes + JobTypeService = "service" + + // JobTypeBatch indicates a short-lived process + JobTypeBatch = "batch" + + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" + + // DefaultNamespace is the default namespace. + DefaultNamespace = "default" +) + +const ( + // RegisterEnforceIndexErrPrefix is the prefix to use in errors caused by + // enforcing the job modify index during registers. + RegisterEnforceIndexErrPrefix = "Enforcing job modify index" +) + +// Jobs is used to access the job-specific endpoints. +type Jobs struct { + client *Client +} + +// Jobs returns a handle on the jobs endpoints. +func (c *Client) Jobs() *Jobs { + return &Jobs{client: c} +} + +func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) { + var resp JobValidateResponse + req := &JobValidateRequest{Job: job} + if q != nil { + req.WriteRequest = WriteRequest{Region: q.Region} + } + wm, err := j.client.write("/v1/validate/job", req, &resp, q) + return &resp, wm, err +} + +// RegisterOptions is used to pass through job registration parameters +type RegisterOptions struct { + EnforceIndex bool + ModifyIndex uint64 + PolicyOverride bool +} + +// Register is used to register a new job. It returns the ID +// of the evaluation, along with any errors encountered. +func (j *Jobs) Register(job *Job, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + return j.RegisterOpts(job, nil, q) +} + +// EnforceRegister is used to register a job enforcing its job modify index. +func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + opts := RegisterOptions{EnforceIndex: true, ModifyIndex: modifyIndex} + return j.RegisterOpts(job, &opts, q) +} + +// Register is used to register a new job. It returns the ID +// of the evaluation, along with any errors encountered. +func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + // Format the request + req := &RegisterJobRequest{ + Job: job, + } + if opts != nil { + if opts.EnforceIndex { + req.EnforceIndex = true + req.JobModifyIndex = opts.ModifyIndex + } + if opts.PolicyOverride { + req.PolicyOverride = true + } + } + + var resp JobRegisterResponse + wm, err := j.client.write("/v1/jobs", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// List is used to list all of the existing jobs. +func (j *Jobs) List(q *QueryOptions) ([]*JobListStub, *QueryMeta, error) { + var resp []*JobListStub + qm, err := j.client.query("/v1/jobs", &resp, q) + if err != nil { + return nil, qm, err + } + sort.Sort(JobIDSort(resp)) + return resp, qm, nil +} + +// PrefixList is used to list all existing jobs that match the prefix. +func (j *Jobs) PrefixList(prefix string) ([]*JobListStub, *QueryMeta, error) { + return j.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to retrieve information about a particular +// job given its unique ID. +func (j *Jobs) Info(jobID string, q *QueryOptions) (*Job, *QueryMeta, error) { + var resp Job + qm, err := j.client.query("/v1/job/"+jobID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Versions is used to retrieve all versions of a particular job given its +// unique ID. +func (j *Jobs) Versions(jobID string, diffs bool, q *QueryOptions) ([]*Job, []*JobDiff, *QueryMeta, error) { + var resp JobVersionsResponse + qm, err := j.client.query(fmt.Sprintf("/v1/job/%s/versions?diffs=%v", jobID, diffs), &resp, q) + if err != nil { + return nil, nil, nil, err + } + return resp.Versions, resp.Diffs, qm, nil +} + +// Allocations is used to return the allocs for a given job ID. +func (j *Jobs) Allocations(jobID string, allAllocs bool, q *QueryOptions) ([]*AllocationListStub, *QueryMeta, error) { + var resp []*AllocationListStub + u, err := url.Parse("/v1/job/" + jobID + "/allocations") + if err != nil { + return nil, nil, err + } + + v := u.Query() + v.Add("all", strconv.FormatBool(allAllocs)) + u.RawQuery = v.Encode() + + qm, err := j.client.query(u.String(), &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocIndexSort(resp)) + return resp, qm, nil +} + +// Deployments is used to query the deployments associated with the given job +// ID. +func (j *Jobs) Deployments(jobID string, q *QueryOptions) ([]*Deployment, *QueryMeta, error) { + var resp []*Deployment + qm, err := j.client.query("/v1/job/"+jobID+"/deployments", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(DeploymentIndexSort(resp)) + return resp, qm, nil +} + +// LatestDeployment is used to query for the latest deployment associated with +// the given job ID. +func (j *Jobs) LatestDeployment(jobID string, q *QueryOptions) (*Deployment, *QueryMeta, error) { + var resp *Deployment + qm, err := j.client.query("/v1/job/"+jobID+"/deployment", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Evaluations is used to query the evaluations associated with the given job +// ID. +func (j *Jobs) Evaluations(jobID string, q *QueryOptions) ([]*Evaluation, *QueryMeta, error) { + var resp []*Evaluation + qm, err := j.client.query("/v1/job/"+jobID+"/evaluations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(EvalIndexSort(resp)) + return resp, qm, nil +} + +// Deregister is used to remove an existing job. If purge is set to true, the job +// is deregistered and purged from the system versus still being queryable and +// eventually GC'ed from the system. Most callers should not specify purge. +func (j *Jobs) Deregister(jobID string, purge bool, q *WriteOptions) (string, *WriteMeta, error) { + var resp JobDeregisterResponse + wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t", jobID, purge), &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// ForceEvaluate is used to force-evaluate an existing job. +func (j *Jobs) ForceEvaluate(jobID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp JobRegisterResponse + wm, err := j.client.write("/v1/job/"+jobID+"/evaluate", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// PeriodicForce spawns a new instance of the periodic job and returns the eval ID +func (j *Jobs) PeriodicForce(jobID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp periodicForceResponse + wm, err := j.client.write("/v1/job/"+jobID+"/periodic/force", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +// PlanOptions is used to pass through job planning parameters +type PlanOptions struct { + Diff bool + PolicyOverride bool +} + +func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { + opts := PlanOptions{Diff: diff} + return j.PlanOpts(job, &opts, q) +} + +func (j *Jobs) PlanOpts(job *Job, opts *PlanOptions, q *WriteOptions) (*JobPlanResponse, *WriteMeta, error) { + if job == nil { + return nil, nil, fmt.Errorf("must pass non-nil job") + } + + // Setup the request + req := &JobPlanRequest{ + Job: job, + } + if opts != nil { + req.Diff = opts.Diff + req.PolicyOverride = opts.PolicyOverride + } + + var resp JobPlanResponse + wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +func (j *Jobs) Summary(jobID string, q *QueryOptions) (*JobSummary, *QueryMeta, error) { + var resp JobSummary + qm, err := j.client.query("/v1/job/"+jobID+"/summary", &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +func (j *Jobs) Dispatch(jobID string, meta map[string]string, + payload []byte, q *WriteOptions) (*JobDispatchResponse, *WriteMeta, error) { + var resp JobDispatchResponse + req := &JobDispatchRequest{ + JobID: jobID, + Meta: meta, + Payload: payload, + } + wm, err := j.client.write("/v1/job/"+jobID+"/dispatch", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Revert is used to revert the given job to the passed version. If +// enforceVersion is set, the job is only reverted if the current version is at +// the passed version. +func (j *Jobs) Revert(jobID string, version uint64, enforcePriorVersion *uint64, + q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) { + + var resp JobRegisterResponse + req := &JobRevertRequest{ + JobID: jobID, + JobVersion: version, + EnforcePriorVersion: enforcePriorVersion, + } + wm, err := j.client.write("/v1/job/"+jobID+"/revert", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// Stable is used to mark a job version's stability. +func (j *Jobs) Stable(jobID string, version uint64, stable bool, + q *WriteOptions) (*JobStabilityResponse, *WriteMeta, error) { + + var resp JobStabilityResponse + req := &JobStabilityRequest{ + JobID: jobID, + JobVersion: version, + Stable: stable, + } + wm, err := j.client.write("/v1/job/"+jobID+"/stable", req, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +// periodicForceResponse is used to deserialize a force response +type periodicForceResponse struct { + EvalID string +} + +// UpdateStrategy defines a task groups update strategy. +type UpdateStrategy struct { + Stagger *time.Duration `mapstructure:"stagger"` + MaxParallel *int `mapstructure:"max_parallel"` + HealthCheck *string `mapstructure:"health_check"` + MinHealthyTime *time.Duration `mapstructure:"min_healthy_time"` + HealthyDeadline *time.Duration `mapstructure:"healthy_deadline"` + AutoRevert *bool `mapstructure:"auto_revert"` + Canary *int `mapstructure:"canary"` +} + +func (u *UpdateStrategy) Copy() *UpdateStrategy { + if u == nil { + return nil + } + + copy := new(UpdateStrategy) + + if u.Stagger != nil { + copy.Stagger = helper.TimeToPtr(*u.Stagger) + } + + if u.MaxParallel != nil { + copy.MaxParallel = helper.IntToPtr(*u.MaxParallel) + } + + if u.HealthCheck != nil { + copy.HealthCheck = helper.StringToPtr(*u.HealthCheck) + } + + if u.MinHealthyTime != nil { + copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime) + } + + if u.HealthyDeadline != nil { + copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline) + } + + if u.AutoRevert != nil { + copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert) + } + + if u.Canary != nil { + copy.Canary = helper.IntToPtr(*u.Canary) + } + + return copy +} + +func (u *UpdateStrategy) Merge(o *UpdateStrategy) { + if o == nil { + return + } + + if o.Stagger != nil { + u.Stagger = helper.TimeToPtr(*o.Stagger) + } + + if o.MaxParallel != nil { + u.MaxParallel = helper.IntToPtr(*o.MaxParallel) + } + + if o.HealthCheck != nil { + u.HealthCheck = helper.StringToPtr(*o.HealthCheck) + } + + if o.MinHealthyTime != nil { + u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime) + } + + if o.HealthyDeadline != nil { + u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline) + } + + if o.AutoRevert != nil { + u.AutoRevert = helper.BoolToPtr(*o.AutoRevert) + } + + if o.Canary != nil { + u.Canary = helper.IntToPtr(*o.Canary) + } +} + +func (u *UpdateStrategy) Canonicalize() { + d := structs.DefaultUpdateStrategy + + if u.MaxParallel == nil { + u.MaxParallel = helper.IntToPtr(d.MaxParallel) + } + + if u.Stagger == nil { + u.Stagger = helper.TimeToPtr(d.Stagger) + } + + if u.HealthCheck == nil { + u.HealthCheck = helper.StringToPtr(d.HealthCheck) + } + + if u.HealthyDeadline == nil { + u.HealthyDeadline = helper.TimeToPtr(d.HealthyDeadline) + } + + if u.MinHealthyTime == nil { + u.MinHealthyTime = helper.TimeToPtr(d.MinHealthyTime) + } + + if u.AutoRevert == nil { + u.AutoRevert = helper.BoolToPtr(d.AutoRevert) + } + + if u.Canary == nil { + u.Canary = helper.IntToPtr(d.Canary) + } +} + +// Empty returns whether the UpdateStrategy is empty or has user defined values. +func (u *UpdateStrategy) Empty() bool { + if u == nil { + return true + } + + if u.Stagger != nil && *u.Stagger != 0 { + return false + } + + if u.MaxParallel != nil && *u.MaxParallel != 0 { + return false + } + + if u.HealthCheck != nil && *u.HealthCheck != "" { + return false + } + + if u.MinHealthyTime != nil && *u.MinHealthyTime != 0 { + return false + } + + if u.HealthyDeadline != nil && *u.HealthyDeadline != 0 { + return false + } + + if u.AutoRevert != nil && *u.AutoRevert { + return false + } + + if u.Canary != nil && *u.Canary != 0 { + return false + } + + return true +} + +// PeriodicConfig is for serializing periodic config for a job. +type PeriodicConfig struct { + Enabled *bool + Spec *string + SpecType *string + ProhibitOverlap *bool `mapstructure:"prohibit_overlap"` + TimeZone *string `mapstructure:"time_zone"` +} + +func (p *PeriodicConfig) Canonicalize() { + if p.Enabled == nil { + p.Enabled = helper.BoolToPtr(true) + } + if p.Spec == nil { + p.Spec = helper.StringToPtr("") + } + if p.SpecType == nil { + p.SpecType = helper.StringToPtr(PeriodicSpecCron) + } + if p.ProhibitOverlap == nil { + p.ProhibitOverlap = helper.BoolToPtr(false) + } + if p.TimeZone == nil || *p.TimeZone == "" { + p.TimeZone = helper.StringToPtr("UTC") + } +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + if *p.SpecType == PeriodicSpecCron { + if e, err := cronexpr.Parse(*p.Spec); err == nil { + return e.Next(fromTime) + } + } + + return time.Time{} +} + +func (p *PeriodicConfig) GetLocation() (*time.Location, error) { + if p.TimeZone == nil || *p.TimeZone == "" { + return time.UTC, nil + } + + return time.LoadLocation(*p.TimeZone) +} + +// ParameterizedJobConfig is used to configure the parameterized job. +type ParameterizedJobConfig struct { + Payload string + MetaRequired []string `mapstructure:"meta_required"` + MetaOptional []string `mapstructure:"meta_optional"` +} + +// Job is used to serialize a job. +type Job struct { + Stop *bool + Region *string + Namespace *string + ID *string + ParentID *string + Name *string + Type *string + Priority *int + AllAtOnce *bool `mapstructure:"all_at_once"` + Datacenters []string + Constraints []*Constraint + TaskGroups []*TaskGroup + Update *UpdateStrategy + Periodic *PeriodicConfig + ParameterizedJob *ParameterizedJobConfig + Payload []byte + Meta map[string]string + VaultToken *string `mapstructure:"vault_token"` + Status *string + StatusDescription *string + Stable *bool + Version *uint64 + SubmitTime *int64 + CreateIndex *uint64 + ModifyIndex *uint64 + JobModifyIndex *uint64 +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +func (j *Job) Canonicalize() { + if j.ID == nil { + j.ID = helper.StringToPtr("") + } + if j.Name == nil { + j.Name = helper.StringToPtr(*j.ID) + } + if j.ParentID == nil { + j.ParentID = helper.StringToPtr("") + } + if j.Namespace == nil { + j.Namespace = helper.StringToPtr(DefaultNamespace) + } + if j.Priority == nil { + j.Priority = helper.IntToPtr(50) + } + if j.Stop == nil { + j.Stop = helper.BoolToPtr(false) + } + if j.Region == nil { + j.Region = helper.StringToPtr("global") + } + if j.Namespace == nil { + j.Namespace = helper.StringToPtr("default") + } + if j.Type == nil { + j.Type = helper.StringToPtr("service") + } + if j.AllAtOnce == nil { + j.AllAtOnce = helper.BoolToPtr(false) + } + if j.VaultToken == nil { + j.VaultToken = helper.StringToPtr("") + } + if j.Status == nil { + j.Status = helper.StringToPtr("") + } + if j.StatusDescription == nil { + j.StatusDescription = helper.StringToPtr("") + } + if j.Stable == nil { + j.Stable = helper.BoolToPtr(false) + } + if j.Version == nil { + j.Version = helper.Uint64ToPtr(0) + } + if j.CreateIndex == nil { + j.CreateIndex = helper.Uint64ToPtr(0) + } + if j.ModifyIndex == nil { + j.ModifyIndex = helper.Uint64ToPtr(0) + } + if j.JobModifyIndex == nil { + j.JobModifyIndex = helper.Uint64ToPtr(0) + } + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + if j.Update != nil { + j.Update.Canonicalize() + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(j) + } +} + +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + JobID string + Namespace string + Summary map[string]TaskGroupSummary + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// JobChildrenSummary contains the summary of children job status +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +func (jc *JobChildrenSummary) Sum() int { + if jc == nil { + return 0 + } + + return int(jc.Pending + jc.Running + jc.Dead) +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + +// JobListStub is used to return a subset of information about +// jobs during list operations. +type JobListStub struct { + ID string + ParentID string + Name string + Type string + Priority int + Periodic bool + ParameterizedJob bool + Stop bool + Status string + StatusDescription string + JobSummary *JobSummary + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 + SubmitTime int64 +} + +// JobIDSort is used to sort jobs by their job ID's. +type JobIDSort []*JobListStub + +func (j JobIDSort) Len() int { + return len(j) +} + +func (j JobIDSort) Less(a, b int) bool { + return j[a].ID < j[b].ID +} + +func (j JobIDSort) Swap(a, b int) { + j[a], j[b] = j[b], j[a] +} + +// NewServiceJob creates and returns a new service-style job +// for long-lived processes using the provided name, ID, and +// relative job priority. +func NewServiceJob(id, name, region string, pri int) *Job { + return newJob(id, name, region, JobTypeService, pri) +} + +// NewBatchJob creates and returns a new batch-style job for +// short-lived processes using the provided name and ID along +// with the relative job priority. +func NewBatchJob(id, name, region string, pri int) *Job { + return newJob(id, name, region, JobTypeBatch, pri) +} + +// newJob is used to create a new Job struct. +func newJob(id, name, region, typ string, pri int) *Job { + return &Job{ + Region: ®ion, + ID: &id, + Name: &name, + Type: &typ, + Priority: &pri, + } +} + +// SetMeta is used to set arbitrary k/v pairs of metadata on a job. +func (j *Job) SetMeta(key, val string) *Job { + if j.Meta == nil { + j.Meta = make(map[string]string) + } + j.Meta[key] = val + return j +} + +// AddDatacenter is used to add a datacenter to a job. +func (j *Job) AddDatacenter(dc string) *Job { + j.Datacenters = append(j.Datacenters, dc) + return j +} + +// Constrain is used to add a constraint to a job. +func (j *Job) Constrain(c *Constraint) *Job { + j.Constraints = append(j.Constraints, c) + return j +} + +// AddTaskGroup adds a task group to an existing job. +func (j *Job) AddTaskGroup(grp *TaskGroup) *Job { + j.TaskGroups = append(j.TaskGroups, grp) + return j +} + +// AddPeriodicConfig adds a periodic config to an existing job. +func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job { + j.Periodic = cfg + return j +} + +type WriteRequest struct { + // The target region for this write + Region string + + // Namespace is the target namespace for this write + Namespace string + + // SecretID is the secret ID of an ACL token + SecretID string +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string + + // Error is a string version of any error that may have occurred + Error string + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +// JobRevertRequest is used to revert a job to a prior version. +type JobRevertRequest struct { + // JobID is the ID of the job being reverted + JobID string + + // JobVersion the version to revert to. + JobVersion uint64 + + // EnforcePriorVersion if set will enforce that the job is at the given + // version before reverting. + EnforcePriorVersion *uint64 + + WriteRequest +} + +// JobUpdateRequest is used to update a job +type JobRegisterRequest struct { + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + PolicyOverride bool + + WriteRequest +} + +// RegisterJobRequest is used to serialize a job registration +type RegisterJobRequest struct { + Job *Job + EnforceIndex bool `json:",omitempty"` + JobModifyIndex uint64 `json:",omitempty"` + PolicyOverride bool `json:",omitempty"` +} + +// JobRegisterResponse is used to respond to a job registration +type JobRegisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + QueryMeta +} + +// JobDeregisterResponse is used to respond to a job deregistration +type JobDeregisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + QueryMeta +} + +type JobPlanRequest struct { + Job *Job + Diff bool + PolicyOverride bool + WriteRequest +} + +type JobPlanResponse struct { + JobModifyIndex uint64 + CreatedEvals []*Evaluation + Diff *JobDiff + Annotations *PlanAnnotations + FailedTGAllocs map[string]*AllocationMetric + NextPeriodicLaunch time.Time + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +type JobDiff struct { + Type string + ID string + Fields []*FieldDiff + Objects []*ObjectDiff + TaskGroups []*TaskGroupDiff +} + +type TaskGroupDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Tasks []*TaskDiff + Updates map[string]uint64 +} + +type TaskDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Annotations []string +} + +type FieldDiff struct { + Type string + Name string + Old, New string + Annotations []string +} + +type ObjectDiff struct { + Type string + Name string + Fields []*FieldDiff + Objects []*ObjectDiff +} + +type PlanAnnotations struct { + DesiredTGUpdates map[string]*DesiredUpdates +} + +type DesiredUpdates struct { + Ignore uint64 + Place uint64 + Migrate uint64 + Stop uint64 + InPlaceUpdate uint64 + DestructiveUpdate uint64 + Canary uint64 +} + +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + WriteMeta +} + +// JobVersionsResponse is used for a job get versions request +type JobVersionsResponse struct { + Versions []*Job + Diffs []*JobDiff + QueryMeta +} + +// JobStabilityRequest is used to marked a job as stable. +type JobStabilityRequest struct { + // Job to set the stability on + JobID string + JobVersion uint64 + + // Set the stability + Stable bool + WriteRequest +} + +// JobStabilityResponse is the response when marking a job as stable. +type JobStabilityResponse struct { + JobModifyIndex uint64 + WriteMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go new file mode 100644 index 0000000000..bed9ac4748 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go @@ -0,0 +1,110 @@ +package api + +import ( + "time" + + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +func MockJob() *Job { + job := &Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr(structs.GenerateUUID()), + Name: helper.StringToPtr("my-job"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + Datacenters: []string{"dc1"}, + Constraints: []*Constraint{ + &Constraint{ + LTarget: "${attr.kernel.name}", + RTarget: "linux", + Operand: "=", + }, + }, + TaskGroups: []*TaskGroup{ + &TaskGroup{ + Name: helper.StringToPtr("web"), + Count: helper.IntToPtr(10), + EphemeralDisk: &EphemeralDisk{ + SizeMB: helper.IntToPtr(150), + }, + RestartPolicy: &RestartPolicy{ + Attempts: helper.IntToPtr(3), + Interval: helper.TimeToPtr(10 * time.Minute), + Delay: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + &Task{ + Name: "web", + Driver: "exec", + Config: map[string]interface{}{ + "command": "/bin/date", + }, + Env: map[string]string{ + "FOO": "bar", + }, + Services: []*Service{ + { + Name: "${TASK}-frontend", + PortLabel: "http", + Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, + Checks: []ServiceCheck{ + { + Name: "check-table", + Type: "script", + Command: "/usr/local/check-table-${meta.database}", + Args: []string{"${meta.version}"}, + Interval: 30 * time.Second, + Timeout: 5 * time.Second, + }, + }, + }, + { + Name: "${TASK}-admin", + PortLabel: "admin", + }, + }, + LogConfig: DefaultLogConfig(), + Resources: &Resources{ + CPU: helper.IntToPtr(500), + MemoryMB: helper.IntToPtr(256), + Networks: []*NetworkResource{ + &NetworkResource{ + MBits: helper.IntToPtr(50), + DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + }, + }, + }, + Meta: map[string]string{ + "elb_check_type": "http", + "elb_check_interval": "30s", + "elb_check_min": "3", + }, + }, + }, + Meta: map[string]string{ + "owner": "armon", + }, + } + job.Canonicalize() + return job +} + +func MockPeriodicJob() *Job { + j := MockJob() + j.Type = helper.StringToPtr("batch") + j.Periodic = &PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + SpecType: helper.StringToPtr("cron"), + Spec: helper.StringToPtr("*/30 * * * *"), + } + return j +} diff --git a/vendor/github.com/hashicorp/nomad/api/namespace.go b/vendor/github.com/hashicorp/nomad/api/namespace.go new file mode 100644 index 0000000000..1771d891db --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/namespace.go @@ -0,0 +1,90 @@ +package api + +import ( + "fmt" + "sort" +) + +// Namespaces is used to query the namespace endpoints. +type Namespaces struct { + client *Client +} + +// Namespaces returns a new handle on the namespaces. +func (c *Client) Namespaces() *Namespaces { + return &Namespaces{client: c} +} + +// List is used to dump all of the namespaces. +func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { + var resp []*Namespace + qm, err := n.client.query("/v1/namespaces", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(NamespaceIndexSort(resp)) + return resp, qm, nil +} + +// PrefixList is used to do a PrefixList search over namespaces +func (n *Namespaces) PrefixList(prefix string, q *QueryOptions) ([]*Namespace, *QueryMeta, error) { + if q == nil { + q = &QueryOptions{Prefix: prefix} + } else { + q.Prefix = prefix + } + + return n.List(q) +} + +// Info is used to query a single namespace by its name. +func (n *Namespaces) Info(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) { + var resp Namespace + qm, err := n.client.query("/v1/namespace/"+name, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// Register is used to register a namespace. +func (n *Namespaces) Register(namespace *Namespace, q *WriteOptions) (*WriteMeta, error) { + wm, err := n.client.write("/v1/namespace", namespace, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a namespace +func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, error) { + wm, err := n.client.delete(fmt.Sprintf("/v1/namespace/%s", namespace), nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Namespace is used to serialize a namespace. +type Namespace struct { + Name string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We +// reverse the test so that we get the highest index first. +type NamespaceIndexSort []*Namespace + +func (n NamespaceIndexSort) Len() int { + return len(n) +} + +func (n NamespaceIndexSort) Less(i, j int) bool { + return n[i].CreateIndex > n[j].CreateIndex +} + +func (n NamespaceIndexSort) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go new file mode 100644 index 0000000000..50a159628a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -0,0 +1,199 @@ +package api + +import ( + "sort" + "strconv" +) + +// Nodes is used to query node-related API endpoints +type Nodes struct { + client *Client +} + +// Nodes returns a handle on the node endpoints. +func (c *Client) Nodes() *Nodes { + return &Nodes{client: c} +} + +// List is used to list out all of the nodes +func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { + var resp NodeIndexSort + qm, err := n.client.query("/v1/nodes", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(NodeIndexSort(resp)) + return resp, qm, nil +} + +func (n *Nodes) PrefixList(prefix string) ([]*NodeListStub, *QueryMeta, error) { + return n.List(&QueryOptions{Prefix: prefix}) +} + +// Info is used to query a specific node by its ID. +func (n *Nodes) Info(nodeID string, q *QueryOptions) (*Node, *QueryMeta, error) { + var resp Node + qm, err := n.client.query("/v1/node/"+nodeID, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +// ToggleDrain is used to toggle drain mode on/off for a given node. +func (n *Nodes) ToggleDrain(nodeID string, drain bool, q *WriteOptions) (*WriteMeta, error) { + drainArg := strconv.FormatBool(drain) + wm, err := n.client.write("/v1/node/"+nodeID+"/drain?enable="+drainArg, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Allocations is used to return the allocations associated with a node. +func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *QueryMeta, error) { + var resp []*Allocation + qm, err := n.client.query("/v1/node/"+nodeID+"/allocations", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(AllocationSort(resp)) + return resp, qm, nil +} + +// ForceEvaluate is used to force-evaluate an existing node. +func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) { + var resp nodeEvalResponse + wm, err := n.client.write("/v1/node/"+nodeID+"/evaluate", nil, &resp, q) + if err != nil { + return "", nil, err + } + return resp.EvalID, wm, nil +} + +func (n *Nodes) Stats(nodeID string, q *QueryOptions) (*HostStats, error) { + nodeClient, err := n.client.GetNodeClient(nodeID, q) + if err != nil { + return nil, err + } + var resp HostStats + if _, err := nodeClient.query("/v1/client/stats", &resp, nil); err != nil { + return nil, err + } + return &resp, nil +} + +func (n *Nodes) GC(nodeID string, q *QueryOptions) error { + nodeClient, err := n.client.GetNodeClient(nodeID, q) + if err != nil { + return err + } + + var resp struct{} + _, err = nodeClient.query("/v1/client/gc", &resp, nil) + return err +} + +// Node is used to deserialize a node entry. +type Node struct { + ID string + Datacenter string + Name string + HTTPAddr string + TLSEnabled bool + Attributes map[string]string + Resources *Resources + Reserved *Resources + Links map[string]string + Meta map[string]string + NodeClass string + Drain bool + Status string + StatusDescription string + StatusUpdatedAt int64 + CreateIndex uint64 + ModifyIndex uint64 +} + +// HostStats represents resource usage stats of the host running a Nomad client +type HostStats struct { + Memory *HostMemoryStats + CPU []*HostCPUStats + DiskStats []*HostDiskStats + Uptime uint64 + CPUTicksConsumed float64 +} + +type HostMemoryStats struct { + Total uint64 + Available uint64 + Used uint64 + Free uint64 +} + +type HostCPUStats struct { + CPU string + User float64 + System float64 + Idle float64 +} + +type HostDiskStats struct { + Device string + Mountpoint string + Size uint64 + Used uint64 + Available uint64 + UsedPercent float64 + InodesUsedPercent float64 +} + +// NodeListStub is a subset of information returned during +// node list operations. +type NodeListStub struct { + ID string + Datacenter string + Name string + NodeClass string + Version string + Drain bool + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// NodeIndexSort reverse sorts nodes by CreateIndex +type NodeIndexSort []*NodeListStub + +func (n NodeIndexSort) Len() int { + return len(n) +} + +func (n NodeIndexSort) Less(i, j int) bool { + return n[i].CreateIndex > n[j].CreateIndex +} + +func (n NodeIndexSort) Swap(i, j int) { + n[i], n[j] = n[j], n[i] +} + +// nodeEvalResponse is used to decode a force-eval. +type nodeEvalResponse struct { + EvalID string +} + +// AllocationSort reverse sorts allocs by CreateIndex. +type AllocationSort []*Allocation + +func (a AllocationSort) Len() int { + return len(a) +} + +func (a AllocationSort) Less(i, j int) bool { + return a[i].CreateIndex > a[j].CreateIndex +} + +func (a AllocationSort) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go new file mode 100644 index 0000000000..a10648a295 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -0,0 +1,87 @@ +package api + +// Operator can be used to perform low-level operator tasks for Nomad. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Nomad. + ID string + + // Node is the node name of the server, as known by Nomad, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Nomad. + Voter bool +} + +// RaftConfigration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r, err := op.c.newRequest("GET", "/v1/operator/raft/configuration") + if err != nil { + return nil, err + } + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r, err := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + if err != nil { + return err + } + r.setWriteOptions(q) + + // TODO (alexdadgar) Currently we made address a query parameter. Once + // IDs are in place this will be DELETE /v1/operator/raft/peer/. + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/raw.go b/vendor/github.com/hashicorp/nomad/api/raw.go new file mode 100644 index 0000000000..9369829c51 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/raw.go @@ -0,0 +1,38 @@ +package api + +import "io" + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Nomad conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Response is used to make a GET request against an endpoint and returns the +// response body +func (raw *Raw) Response(endpoint string, q *QueryOptions) (io.ReadCloser, error) { + return raw.c.rawQuery(endpoint, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} + +// Delete is used to do a DELETE request against an endpoint +// and serialize/deserialized using the standard Nomad conventions. +func (raw *Raw) Delete(endpoint string, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.delete(endpoint, out, q) +} diff --git a/vendor/github.com/hashicorp/nomad/api/regions.go b/vendor/github.com/hashicorp/nomad/api/regions.go new file mode 100644 index 0000000000..c94ce297a8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/regions.go @@ -0,0 +1,23 @@ +package api + +import "sort" + +// Regions is used to query the regions in the cluster. +type Regions struct { + client *Client +} + +// Regions returns a handle on the regions endpoints. +func (c *Client) Regions() *Regions { + return &Regions{client: c} +} + +// List returns a list of all of the regions. +func (r *Regions) List() ([]string, error) { + var resp []string + if _, err := r.client.query("/v1/regions", &resp, nil); err != nil { + return nil, err + } + sort.Strings(resp) + return resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/resources.go b/vendor/github.com/hashicorp/nomad/api/resources.go new file mode 100644 index 0000000000..8d3f27c6cd --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/resources.go @@ -0,0 +1,81 @@ +package api + +import "github.com/hashicorp/nomad/helper" + +// Resources encapsulates the required resources of +// a given task or task group. +type Resources struct { + CPU *int + MemoryMB *int `mapstructure:"memory"` + DiskMB *int `mapstructure:"disk"` + IOPS *int + Networks []*NetworkResource +} + +func (r *Resources) Canonicalize() { + if r.CPU == nil { + r.CPU = helper.IntToPtr(100) + } + if r.MemoryMB == nil { + r.MemoryMB = helper.IntToPtr(10) + } + if r.IOPS == nil { + r.IOPS = helper.IntToPtr(0) + } + for _, n := range r.Networks { + n.Canonicalize() + } +} + +func MinResources() *Resources { + return &Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + } + +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other == nil { + return + } + if other.CPU != nil { + r.CPU = other.CPU + } + if other.MemoryMB != nil { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != nil { + r.DiskMB = other.DiskMB + } + if other.IOPS != nil { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + +type Port struct { + Label string + Value int `mapstructure:"static"` +} + +// NetworkResource is used to describe required network +// resources of a given task. +type NetworkResource struct { + Device string + CIDR string + IP string + MBits *int + ReservedPorts []Port + DynamicPorts []Port +} + +func (n *NetworkResource) Canonicalize() { + if n.MBits == nil { + n.MBits = helper.IntToPtr(10) + } +} diff --git a/vendor/github.com/hashicorp/nomad/api/search.go b/vendor/github.com/hashicorp/nomad/api/search.go new file mode 100644 index 0000000000..6a6cb9b59e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/search.go @@ -0,0 +1,39 @@ +package api + +import ( + "github.com/hashicorp/nomad/api/contexts" +) + +type Search struct { + client *Client +} + +// Search returns a handle on the Search endpoints +func (c *Client) Search() *Search { + return &Search{client: c} +} + +// PrefixSearch returns a list of matches for a particular context and prefix. +func (s *Search) PrefixSearch(prefix string, context contexts.Context, q *QueryOptions) (*SearchResponse, *QueryMeta, error) { + var resp SearchResponse + req := &SearchRequest{Prefix: prefix, Context: context} + + qm, err := s.client.putQuery("/v1/search", req, &resp, q) + if err != nil { + return nil, nil, err + } + + return &resp, qm, nil +} + +type SearchRequest struct { + Prefix string + Context contexts.Context + QueryOptions +} + +type SearchResponse struct { + Matches map[contexts.Context][]string + Truncations map[contexts.Context]bool + QueryMeta +} diff --git a/vendor/github.com/hashicorp/nomad/api/sentinel.go b/vendor/github.com/hashicorp/nomad/api/sentinel.go new file mode 100644 index 0000000000..c1e52c7cb8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/sentinel.go @@ -0,0 +1,79 @@ +package api + +import "fmt" + +// SentinelPolicies is used to query the Sentinel Policy endpoints. +type SentinelPolicies struct { + client *Client +} + +// SentinelPolicies returns a new handle on the Sentinel policies. +func (c *Client) SentinelPolicies() *SentinelPolicies { + return &SentinelPolicies{client: c} +} + +// List is used to dump all of the policies. +func (a *SentinelPolicies) List(q *QueryOptions) ([]*SentinelPolicyListStub, *QueryMeta, error) { + var resp []*SentinelPolicyListStub + qm, err := a.client.query("/v1/sentinel/policies", &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} + +// Upsert is used to create or update a policy +func (a *SentinelPolicies) Upsert(policy *SentinelPolicy, q *WriteOptions) (*WriteMeta, error) { + if policy == nil || policy.Name == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.write("/v1/sentinel/policy/"+policy.Name, policy, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Delete is used to delete a policy +func (a *SentinelPolicies) Delete(policyName string, q *WriteOptions) (*WriteMeta, error) { + if policyName == "" { + return nil, fmt.Errorf("missing policy name") + } + wm, err := a.client.delete("/v1/sentinel/policy/"+policyName, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Info is used to query a specific policy +func (a *SentinelPolicies) Info(policyName string, q *QueryOptions) (*SentinelPolicy, *QueryMeta, error) { + if policyName == "" { + return nil, nil, fmt.Errorf("missing policy name") + } + var resp SentinelPolicy + wm, err := a.client.query("/v1/sentinel/policy/"+policyName, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, wm, nil +} + +type SentinelPolicy struct { + Name string + Description string + Scope string + EnforcementLevel string + Policy string + CreateIndex uint64 + ModifyIndex uint64 +} + +type SentinelPolicyListStub struct { + Name string + Description string + Scope string + EnforcementLevel string + CreateIndex uint64 + ModifyIndex uint64 +} diff --git a/vendor/github.com/hashicorp/nomad/api/status.go b/vendor/github.com/hashicorp/nomad/api/status.go new file mode 100644 index 0000000000..da1cb4c02e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status is used to query the status-related endpoints. +type Status struct { + client *Client +} + +// Status returns a handle on the status endpoints. +func (c *Client) Status() *Status { + return &Status{client: c} +} + +// Leader is used to query for the current cluster leader. +func (s *Status) Leader() (string, error) { + var resp string + _, err := s.client.query("/v1/status/leader", &resp, nil) + if err != nil { + return "", err + } + return resp, nil +} + +// RegionLeader is used to query for the leader in the passed region. +func (s *Status) RegionLeader(region string) (string, error) { + var resp string + q := QueryOptions{Region: region} + _, err := s.client.query("/v1/status/leader", &resp, &q) + if err != nil { + return "", err + } + return resp, nil +} + +// Peers is used to query the addresses of the server peers +// in the cluster. +func (s *Status) Peers() ([]string, error) { + var resp []string + _, err := s.client.query("/v1/status/peers", &resp, nil) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/github.com/hashicorp/nomad/api/system.go b/vendor/github.com/hashicorp/nomad/api/system.go new file mode 100644 index 0000000000..3717b9aea7 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/system.go @@ -0,0 +1,23 @@ +package api + +// Status is used to query the status-related endpoints. +type System struct { + client *Client +} + +// System returns a handle on the system endpoints. +func (c *Client) System() *System { + return &System{client: c} +} + +func (s *System) GarbageCollect() error { + var req struct{} + _, err := s.client.write("/v1/system/gc", &req, nil, nil) + return err +} + +func (s *System) ReconcileSummaries() error { + var req struct{} + _, err := s.client.write("/v1/system/reconcile/summaries", &req, nil, nil) + return err +} diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go new file mode 100644 index 0000000000..a3d10831e8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -0,0 +1,617 @@ +package api + +import ( + "fmt" + "path" + + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/nomad/helper" +) + +// MemoryStats holds memory usage related stats +type MemoryStats struct { + RSS uint64 + Cache uint64 + Swap uint64 + MaxUsage uint64 + KernelUsage uint64 + KernelMaxUsage uint64 + Measured []string +} + +// CpuStats holds cpu usage related stats +type CpuStats struct { + SystemMode float64 + UserMode float64 + TotalTicks float64 + ThrottledPeriods uint64 + ThrottledTime uint64 + Percent float64 + Measured []string +} + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage struct { + MemoryStats *MemoryStats + CpuStats *CpuStats +} + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage struct { + ResourceUsage *ResourceUsage + Timestamp int64 + Pids map[string]*ResourceUsage +} + +// AllocResourceUsage holds the aggregated task resource usage of the +// allocation. +type AllocResourceUsage struct { + ResourceUsage *ResourceUsage + Tasks map[string]*TaskResourceUsage + Timestamp int64 +} + +// RestartPolicy defines how the Nomad client restarts +// tasks in a taskgroup when they fail +type RestartPolicy struct { + Interval *time.Duration + Attempts *int + Delay *time.Duration + Mode *string +} + +func (r *RestartPolicy) Merge(rp *RestartPolicy) { + if rp.Interval != nil { + r.Interval = rp.Interval + } + if rp.Attempts != nil { + r.Attempts = rp.Attempts + } + if rp.Delay != nil { + r.Delay = rp.Delay + } + if rp.Mode != nil { + r.Mode = rp.Mode + } +} + +// CheckRestart describes if and when a task should be restarted based on +// failing health checks. +type CheckRestart struct { + Limit int `mapstructure:"limit"` + Grace *time.Duration `mapstructure:"grace_period"` + IgnoreWarnings bool `mapstructure:"ignore_warnings"` +} + +// Canonicalize CheckRestart fields if not nil. +func (c *CheckRestart) Canonicalize() { + if c == nil { + return + } + + if c.Grace == nil { + c.Grace = helper.TimeToPtr(1 * time.Second) + } +} + +// Copy returns a copy of CheckRestart or nil if unset. +func (c *CheckRestart) Copy() *CheckRestart { + if c == nil { + return nil + } + + nc := new(CheckRestart) + nc.Limit = c.Limit + if c.Grace != nil { + g := *c.Grace + nc.Grace = &g + } + nc.IgnoreWarnings = c.IgnoreWarnings + return nc +} + +// Merge values from other CheckRestart over default values on this +// CheckRestart and return merged copy. +func (c *CheckRestart) Merge(o *CheckRestart) *CheckRestart { + if c == nil { + // Just return other + return o + } + + nc := c.Copy() + + if o == nil { + // Nothing to merge + return nc + } + + if nc.Limit == 0 { + nc.Limit = o.Limit + } + + if nc.Grace == nil { + nc.Grace = o.Grace + } + + if nc.IgnoreWarnings { + nc.IgnoreWarnings = o.IgnoreWarnings + } + + return nc +} + +// The ServiceCheck data model represents the consul health check that +// Nomad registers for a Task +type ServiceCheck struct { + Id string + Name string + Type string + Command string + Args []string + Path string + Protocol string + PortLabel string `mapstructure:"port"` + Interval time.Duration + Timeout time.Duration + InitialStatus string `mapstructure:"initial_status"` + TLSSkipVerify bool `mapstructure:"tls_skip_verify"` + Header map[string][]string + Method string + CheckRestart *CheckRestart `mapstructure:"check_restart"` +} + +// The Service model represents a Consul service definition +type Service struct { + Id string + Name string + Tags []string + PortLabel string `mapstructure:"port"` + AddressMode string `mapstructure:"address_mode"` + Checks []ServiceCheck + CheckRestart *CheckRestart `mapstructure:"check_restart"` +} + +func (s *Service) Canonicalize(t *Task, tg *TaskGroup, job *Job) { + if s.Name == "" { + s.Name = fmt.Sprintf("%s-%s-%s", *job.Name, *tg.Name, t.Name) + } + + // Default to AddressModeAuto + if s.AddressMode == "" { + s.AddressMode = "auto" + } + + s.CheckRestart.Canonicalize() + + // Canonicallize CheckRestart on Checks and merge Service.CheckRestart + // into each check. + for _, c := range s.Checks { + c.CheckRestart.Canonicalize() + c.CheckRestart = c.CheckRestart.Merge(s.CheckRestart) + } +} + +// EphemeralDisk is an ephemeral disk object +type EphemeralDisk struct { + Sticky *bool + Migrate *bool + SizeMB *int `mapstructure:"size"` +} + +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + } +} + +func (e *EphemeralDisk) Canonicalize() { + if e.Sticky == nil { + e.Sticky = helper.BoolToPtr(false) + } + if e.Migrate == nil { + e.Migrate = helper.BoolToPtr(false) + } + if e.SizeMB == nil { + e.SizeMB = helper.IntToPtr(300) + } +} + +// TaskGroup is the unit of scheduling. +type TaskGroup struct { + Name *string + Count *int + Constraints []*Constraint + Tasks []*Task + RestartPolicy *RestartPolicy + EphemeralDisk *EphemeralDisk + Update *UpdateStrategy + Meta map[string]string +} + +// NewTaskGroup creates a new TaskGroup. +func NewTaskGroup(name string, count int) *TaskGroup { + return &TaskGroup{ + Name: helper.StringToPtr(name), + Count: helper.IntToPtr(count), + } +} + +func (g *TaskGroup) Canonicalize(job *Job) { + if g.Name == nil { + g.Name = helper.StringToPtr("") + } + if g.Count == nil { + g.Count = helper.IntToPtr(1) + } + for _, t := range g.Tasks { + t.Canonicalize(g, job) + } + if g.EphemeralDisk == nil { + g.EphemeralDisk = DefaultEphemeralDisk() + } else { + g.EphemeralDisk.Canonicalize() + } + + // Merge the update policy from the job + if ju, tu := job.Update != nil, g.Update != nil; ju && tu { + // Merge the jobs and task groups definition of the update strategy + jc := job.Update.Copy() + jc.Merge(g.Update) + g.Update = jc + } else if ju && !job.Update.Empty() { + // Inherit the jobs as long as it is non-empty. + jc := job.Update.Copy() + g.Update = jc + } + + if g.Update != nil { + g.Update.Canonicalize() + } + + var defaultRestartPolicy *RestartPolicy + switch *job.Type { + case "service", "system": + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + } + default: + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(15), + Interval: helper.TimeToPtr(7 * 24 * time.Hour), + Mode: helper.StringToPtr("delay"), + } + } + + if g.RestartPolicy != nil { + defaultRestartPolicy.Merge(g.RestartPolicy) + } + g.RestartPolicy = defaultRestartPolicy +} + +// Constrain is used to add a constraint to a task group. +func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup { + g.Constraints = append(g.Constraints, c) + return g +} + +// AddMeta is used to add a meta k/v pair to a task group +func (g *TaskGroup) SetMeta(key, val string) *TaskGroup { + if g.Meta == nil { + g.Meta = make(map[string]string) + } + g.Meta[key] = val + return g +} + +// AddTask is used to add a new task to a task group. +func (g *TaskGroup) AddTask(t *Task) *TaskGroup { + g.Tasks = append(g.Tasks, t) + return g +} + +// RequireDisk adds a ephemeral disk to the task group +func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup { + g.EphemeralDisk = disk + return g +} + +// LogConfig provides configuration for log rotation +type LogConfig struct { + MaxFiles *int `mapstructure:"max_files"` + MaxFileSizeMB *int `mapstructure:"max_file_size"` +} + +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(10), + } +} + +func (l *LogConfig) Canonicalize() { + if l.MaxFiles == nil { + l.MaxFiles = helper.IntToPtr(10) + } + if l.MaxFileSizeMB == nil { + l.MaxFileSizeMB = helper.IntToPtr(10) + } +} + +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { + File string +} + +// Task is a single process in a task group. +type Task struct { + Name string + Driver string + User string + Config map[string]interface{} + Constraints []*Constraint + Env map[string]string + Services []*Service + Resources *Resources + Meta map[string]string + KillTimeout *time.Duration `mapstructure:"kill_timeout"` + LogConfig *LogConfig `mapstructure:"logs"` + Artifacts []*TaskArtifact + Vault *Vault + Templates []*Template + DispatchPayload *DispatchPayloadConfig + Leader bool + ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` +} + +func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { + min := MinResources() + min.Merge(t.Resources) + min.Canonicalize() + t.Resources = min + + if t.KillTimeout == nil { + t.KillTimeout = helper.TimeToPtr(5 * time.Second) + } + if t.LogConfig == nil { + t.LogConfig = DefaultLogConfig() + } else { + t.LogConfig.Canonicalize() + } + for _, artifact := range t.Artifacts { + artifact.Canonicalize() + } + if t.Vault != nil { + t.Vault.Canonicalize() + } + for _, tmpl := range t.Templates { + tmpl.Canonicalize() + } + for _, s := range t.Services { + s.Canonicalize(t, tg, job) + } +} + +// TaskArtifact is used to download artifacts before running a task. +type TaskArtifact struct { + GetterSource *string `mapstructure:"source"` + GetterOptions map[string]string `mapstructure:"options"` + GetterMode *string `mapstructure:"mode"` + RelativeDest *string `mapstructure:"destination"` +} + +func (a *TaskArtifact) Canonicalize() { + if a.GetterMode == nil { + a.GetterMode = helper.StringToPtr("any") + } + if a.GetterSource == nil { + // Shouldn't be possible, but we don't want to panic + a.GetterSource = helper.StringToPtr("") + } + if a.RelativeDest == nil { + switch *a.GetterMode { + case "file": + // File mode should default to local/filename + dest := *a.GetterSource + dest = path.Base(dest) + dest = filepath.Join("local", dest) + a.RelativeDest = &dest + default: + // Default to a directory + a.RelativeDest = helper.StringToPtr("local/") + } + } +} + +type Template struct { + SourcePath *string `mapstructure:"source"` + DestPath *string `mapstructure:"destination"` + EmbeddedTmpl *string `mapstructure:"data"` + ChangeMode *string `mapstructure:"change_mode"` + ChangeSignal *string `mapstructure:"change_signal"` + Splay *time.Duration `mapstructure:"splay"` + Perms *string `mapstructure:"perms"` + LeftDelim *string `mapstructure:"left_delimiter"` + RightDelim *string `mapstructure:"right_delimiter"` + Envvars *bool `mapstructure:"env"` + VaultGrace *time.Duration `mapstructure:"vault_grace"` +} + +func (tmpl *Template) Canonicalize() { + if tmpl.SourcePath == nil { + tmpl.SourcePath = helper.StringToPtr("") + } + if tmpl.DestPath == nil { + tmpl.DestPath = helper.StringToPtr("") + } + if tmpl.EmbeddedTmpl == nil { + tmpl.EmbeddedTmpl = helper.StringToPtr("") + } + if tmpl.ChangeMode == nil { + tmpl.ChangeMode = helper.StringToPtr("restart") + } + if tmpl.ChangeSignal == nil { + if *tmpl.ChangeMode == "signal" { + tmpl.ChangeSignal = helper.StringToPtr("SIGHUP") + } else { + tmpl.ChangeSignal = helper.StringToPtr("") + } + } else { + sig := *tmpl.ChangeSignal + tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig)) + } + if tmpl.Splay == nil { + tmpl.Splay = helper.TimeToPtr(5 * time.Second) + } + if tmpl.Perms == nil { + tmpl.Perms = helper.StringToPtr("0644") + } + if tmpl.LeftDelim == nil { + tmpl.LeftDelim = helper.StringToPtr("{{") + } + if tmpl.RightDelim == nil { + tmpl.RightDelim = helper.StringToPtr("}}") + } + if tmpl.Envvars == nil { + tmpl.Envvars = helper.BoolToPtr(false) + } + if tmpl.VaultGrace == nil { + tmpl.VaultGrace = helper.TimeToPtr(5 * time.Minute) + } +} + +type Vault struct { + Policies []string + Env *bool + ChangeMode *string `mapstructure:"change_mode"` + ChangeSignal *string `mapstructure:"change_signal"` +} + +func (v *Vault) Canonicalize() { + if v.Env == nil { + v.Env = helper.BoolToPtr(true) + } + if v.ChangeMode == nil { + v.ChangeMode = helper.StringToPtr("restart") + } + if v.ChangeSignal == nil { + v.ChangeSignal = helper.StringToPtr("SIGHUP") + } +} + +// NewTask creates and initializes a new Task. +func NewTask(name, driver string) *Task { + return &Task{ + Name: name, + Driver: driver, + } +} + +// Configure is used to configure a single k/v pair on +// the task. +func (t *Task) SetConfig(key string, val interface{}) *Task { + if t.Config == nil { + t.Config = make(map[string]interface{}) + } + t.Config[key] = val + return t +} + +// SetMeta is used to add metadata k/v pairs to the task. +func (t *Task) SetMeta(key, val string) *Task { + if t.Meta == nil { + t.Meta = make(map[string]string) + } + t.Meta[key] = val + return t +} + +// Require is used to add resource requirements to a task. +func (t *Task) Require(r *Resources) *Task { + t.Resources = r + return t +} + +// Constraint adds a new constraints to a single task. +func (t *Task) Constrain(c *Constraint) *Task { + t.Constraints = append(t.Constraints, c) + return t +} + +// SetLogConfig sets a log config to a task +func (t *Task) SetLogConfig(l *LogConfig) *Task { + t.LogConfig = l + return t +} + +// TaskState tracks the current state of a task and events that caused state +// transitions. +type TaskState struct { + State string + Failed bool + Restarts uint64 + LastRestart time.Time + StartedAt time.Time + FinishedAt time.Time + Events []*TaskEvent +} + +const ( + TaskSetup = "Task Setup" + TaskSetupFailure = "Setup Failure" + TaskDriverFailure = "Driver Failure" + TaskDriverMessage = "Driver" + TaskReceived = "Received" + TaskFailedValidation = "Failed Validation" + TaskStarted = "Started" + TaskTerminated = "Terminated" + TaskKilling = "Killing" + TaskKilled = "Killed" + TaskRestarting = "Restarting" + TaskNotRestarting = "Not Restarting" + TaskDownloadingArtifacts = "Downloading Artifacts" + TaskArtifactDownloadFailed = "Failed Artifact Download" + TaskSiblingFailed = "Sibling Task Failed" + TaskSignaling = "Signaling" + TaskRestartSignal = "Restart Signaled" + TaskLeaderDead = "Leader Task Dead" + TaskBuildingTaskDir = "Building Task Directory" + TaskGenericMessage = "Generic" +) + +// TaskEvent is an event that effects the state of a task and contains meta-data +// appropriate to the events type. +type TaskEvent struct { + Type string + Time int64 + FailsTask bool + RestartReason string + SetupError string + DriverError string + DriverMessage string + ExitCode int + Signal int + Message string + KillReason string + KillTimeout time.Duration + KillError string + StartDelay int64 + DownloadError string + ValidationError string + DiskLimit int64 + DiskSize int64 + FailedSibling string + VaultError string + TaskSignalReason string + TaskSignal string + GenericSource string +} From 331068d7c7155735af7c102416ba833e5819f6fa Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 2 Oct 2017 16:48:23 +0100 Subject: [PATCH 013/329] Added nomad as dependency after include fix --- vendor/vendor.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vendor/vendor.json b/vendor/vendor.json index 2ccc97e6db..ce23fa2198 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1144,8 +1144,7 @@ "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", "path": "github.com/hashicorp/nomad/api", "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z", - "version": "v0.7.0-beta1" + "revisionTime": "2017-09-20T19:48:06Z" }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", From 6a33bf0bf24b987e5030797bfe002205ba950a58 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 2 Oct 2017 16:56:39 +0100 Subject: [PATCH 014/329] Adding further nomad deps --- vendor/github.com/gorhill/cronexpr/APLv2 | 202 + vendor/github.com/gorhill/cronexpr/GPLv3 | 674 ++ vendor/github.com/gorhill/cronexpr/README.md | 134 + .../github.com/gorhill/cronexpr/cronexpr.go | 266 + .../gorhill/cronexpr/cronexpr_next.go | 292 + .../gorhill/cronexpr/cronexpr_parse.go | 498 ++ .../hashicorp/nomad/api/contexts/contexts.go | 14 + .../hashicorp/nomad/helper/funcs.go | 272 + .../hashicorp/nomad/nomad/structs/bitmap.go | 78 + .../hashicorp/nomad/nomad/structs/diff.go | 1231 ++++ .../hashicorp/nomad/nomad/structs/funcs.go | 310 + .../hashicorp/nomad/nomad/structs/network.go | 326 + .../nomad/nomad/structs/node_class.go | 94 + .../hashicorp/nomad/nomad/structs/operator.go | 49 + .../hashicorp/nomad/nomad/structs/structs.go | 5783 +++++++++++++++++ .../nomad/nomad/structs/structs_codegen.go | 3 + vendor/vendor.json | 24 + 17 files changed, 10250 insertions(+) create mode 100644 vendor/github.com/gorhill/cronexpr/APLv2 create mode 100644 vendor/github.com/gorhill/cronexpr/GPLv3 create mode 100644 vendor/github.com/gorhill/cronexpr/README.md create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr.go create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr_next.go create mode 100644 vendor/github.com/gorhill/cronexpr/cronexpr_parse.go create mode 100644 vendor/github.com/hashicorp/nomad/api/contexts/contexts.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/funcs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/diff.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/network.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/operator.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go diff --git a/vendor/github.com/gorhill/cronexpr/APLv2 b/vendor/github.com/gorhill/cronexpr/APLv2 new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/APLv2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gorhill/cronexpr/GPLv3 b/vendor/github.com/gorhill/cronexpr/GPLv3 new file mode 100644 index 0000000000..c13fcfaf1d --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/GPLv3 @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see {http://www.gnu.org/licenses/}. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + cronexpr Copyright (C) 2013 Raymond Hill + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +{http://www.gnu.org/licenses/}. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +{http://www.gnu.org/philosophy/why-not-lgpl.html}. diff --git a/vendor/github.com/gorhill/cronexpr/README.md b/vendor/github.com/gorhill/cronexpr/README.md new file mode 100644 index 0000000000..e8c56d29d2 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/README.md @@ -0,0 +1,134 @@ +Golang Cron expression parser +============================= +Given a cron expression and a time stamp, you can get the next time stamp which satisfies the cron expression. + +In another project, I decided to use cron expression syntax to encode scheduling information. Thus this standalone library to parse and apply time stamps to cron expressions. + +The time-matching algorithm in this implementation is efficient, it avoids as much as possible to guess the next matching time stamp, a common technique seen in a number of implementations out there. + +There is also a companion command-line utility to evaluate cron time expressions: (which of course uses this library). + +Implementation +-------------- +The reference documentation for this implementation is found at +, which I copy/pasted here (laziness!) with modifications where this implementation differs: + + Field name Mandatory? Allowed values Allowed special characters + ---------- ---------- -------------- -------------------------- + Seconds No 0-59 * / , - + Minutes Yes 0-59 * / , - + Hours Yes 0-23 * / , - + Day of month Yes 1-31 * / , - L W + Month Yes 1-12 or JAN-DEC * / , - + Day of week Yes 0-6 or SUN-SAT * / , - L # + Year No 1970–2099 * / , - + +#### Asterisk ( * ) +The asterisk indicates that the cron expression matches for all values of the field. E.g., using an asterisk in the 4th field (month) indicates every month. + +#### Slash ( / ) +Slashes describe increments of ranges. For example `3-59/15` in the minute field indicate the third minute of the hour and every 15 minutes thereafter. The form `*/...` is equivalent to the form "first-last/...", that is, an increment over the largest possible range of the field. + +#### Comma ( , ) +Commas are used to separate items of a list. For example, using `MON,WED,FRI` in the 5th field (day of week) means Mondays, Wednesdays and Fridays. + +#### Hyphen ( - ) +Hyphens define ranges. For example, 2000-2010 indicates every year between 2000 and 2010 AD, inclusive. + +#### L +`L` stands for "last". When used in the day-of-week field, it allows you to specify constructs such as "the last Friday" (`5L`) of a given month. In the day-of-month field, it specifies the last day of the month. + +#### W +The `W` character is allowed for the day-of-month field. This character is used to specify the business day (Monday-Friday) nearest the given day. As an example, if you were to specify `15W` as the value for the day-of-month field, the meaning is: "the nearest business day to the 15th of the month." + +So, if the 15th is a Saturday, the trigger fires on Friday the 14th. If the 15th is a Sunday, the trigger fires on Monday the 16th. If the 15th is a Tuesday, then it fires on Tuesday the 15th. However if you specify `1W` as the value for day-of-month, and the 1st is a Saturday, the trigger fires on Monday the 3rd, as it does not 'jump' over the boundary of a month's days. + +The `W` character can be specified only when the day-of-month is a single day, not a range or list of days. + +The `W` character can also be combined with `L`, i.e. `LW` to mean "the last business day of the month." + +#### Hash ( # ) +`#` is allowed for the day-of-week field, and must be followed by a number between one and five. It allows you to specify constructs such as "the second Friday" of a given month. + +Predefined cron expressions +--------------------------- +(Copied from , with text modified according to this implementation) + + Entry Description Equivalent to + @annually Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @yearly Run once a year at midnight in the morning of January 1 0 0 0 1 1 * * + @monthly Run once a month at midnight in the morning of the first of the month 0 0 0 1 * * * + @weekly Run once a week at midnight in the morning of Sunday 0 0 0 * * 0 * + @daily Run once a day at midnight 0 0 0 * * * * + @hourly Run once an hour at the beginning of the hour 0 0 * * * * * + @reboot Not supported + +Other details +------------- +* If only six fields are present, a `0` second field is prepended, that is, `* * * * * 2013` internally become `0 * * * * * 2013`. +* If only five fields are present, a `0` second field is prepended and a wildcard year field is appended, that is, `* * * * Mon` internally become `0 * * * * Mon *`. +* Domain for day-of-week field is [0-7] instead of [0-6], 7 being Sunday (like 0). This to comply with http://linux.die.net/man/5/crontab#. +* As of now, the behavior of the code is undetermined if a malformed cron expression is supplied + +Install +------- + go get github.com/gorhill/cronexpr + +Usage +----- +Import the library: + + import "github.com/gorhill/cronexpr" + import "time" + +Simplest way: + + nextTime := cronexpr.MustParse("0 0 29 2 *").Next(time.Now()) + +Assuming `time.Now()` is "2013-08-29 09:28:00", then `nextTime` will be "2016-02-29 00:00:00". + +You can keep the returned Expression pointer around if you want to reuse it: + + expr := cronexpr.MustParse("0 0 29 2 *") + nextTime := expr.Next(time.Now()) + ... + nextTime = expr.Next(nextTime) + +Use `time.IsZero()` to find out whether a valid time was returned. For example, + + cronexpr.MustParse("* * * * * 1980").Next(time.Now()).IsZero() + +will return `true`, whereas + + cronexpr.MustParse("* * * * * 2050").Next(time.Now()).IsZero() + +will return `false` (as of 2013-08-29...) + +You may also query for `n` next time stamps: + + cronexpr.MustParse("0 0 29 2 *").NextN(time.Now(), 5) + +which returns a slice of time.Time objects, containing the following time stamps (as of 2013-08-30): + + 2016-02-29 00:00:00 + 2020-02-29 00:00:00 + 2024-02-29 00:00:00 + 2028-02-29 00:00:00 + 2032-02-29 00:00:00 + +The time zone of time values returned by `Next` and `NextN` is always the +time zone of the time value passed as argument, unless a zero time value is +returned. + +API +--- + + +License +------- + +License: pick the one which suits you best: + +- GPL v3 see +- APL v2 see + diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr.go b/vendor/github.com/gorhill/cronexpr/cronexpr.go new file mode 100644 index 0000000000..58b518fa58 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr.go @@ -0,0 +1,266 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +// Package cronexpr parses cron time expressions. +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "sort" + "time" +) + +/******************************************************************************/ + +// A Expression represents a specific cron time expression as defined at +// +type Expression struct { + expression string + secondList []int + minuteList []int + hourList []int + daysOfMonth map[int]bool + workdaysOfMonth map[int]bool + lastDayOfMonth bool + lastWorkdayOfMonth bool + daysOfMonthRestricted bool + actualDaysOfMonthList []int + monthList []int + daysOfWeek map[int]bool + specificWeekDaysOfWeek map[int]bool + lastWeekDaysOfWeek map[int]bool + daysOfWeekRestricted bool + yearList []int +} + +/******************************************************************************/ + +// MustParse returns a new Expression pointer. It expects a well-formed cron +// expression. If a malformed cron expression is supplied, it will `panic`. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func MustParse(cronLine string) *Expression { + expr, err := Parse(cronLine) + if err != nil { + panic(err) + } + return expr +} + +/******************************************************************************/ + +// Parse returns a new Expression pointer. An error is returned if a malformed +// cron expression is supplied. +// See for documentation +// about what is a well-formed cron expression from this library's point of +// view. +func Parse(cronLine string) (*Expression, error) { + + // Maybe one of the built-in aliases is being used + cron := cronNormalizer.Replace(cronLine) + + indices := fieldFinder.FindAllStringIndex(cron, -1) + fieldCount := len(indices) + if fieldCount < 5 { + return nil, fmt.Errorf("missing field(s)") + } + // ignore fields beyond 7th + if fieldCount > 7 { + fieldCount = 7 + } + + var expr = Expression{} + var field = 0 + var err error + + // second field (optional) + if fieldCount == 7 { + err = expr.secondFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + } else { + expr.secondList = []int{0} + } + + // minute field + err = expr.minuteFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // hour field + err = expr.hourFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of month field + err = expr.domFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // month field + err = expr.monthFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // day of week field + err = expr.dowFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + field += 1 + + // year field + if field < fieldCount { + err = expr.yearFieldHandler(cron[indices[field][0]:indices[field][1]]) + if err != nil { + return nil, err + } + } else { + expr.yearList = yearDescriptor.defaultList + } + + return &expr, nil +} + +/******************************************************************************/ + +// Next returns the closest time instant immediately following `fromTime` which +// matches the cron expression `expr`. +// +// The `time.Location` of the returned time instant is the same as that of +// `fromTime`. +// +// The zero value of time.Time is returned if no matching time instant exists +// or if a `fromTime` is itself a zero value. +func (expr *Expression) Next(fromTime time.Time) time.Time { + // Special case + if fromTime.IsZero() { + return fromTime + } + + // Since expr.nextSecond()-expr.nextMonth() expects that the + // supplied time stamp is a perfect match to the underlying cron + // expression, and since this function is an entry point where `fromTime` + // does not necessarily matches the underlying cron expression, + // we first need to ensure supplied time stamp matches + // the cron expression. If not, this means the supplied time + // stamp falls in between matching time stamps, thus we move + // to closest future matching immediately upon encountering a mismatching + // time stamp. + + // year + v := fromTime.Year() + i := sort.SearchInts(expr.yearList, v) + if i == len(expr.yearList) { + return time.Time{} + } + if v != expr.yearList[i] { + return expr.nextYear(fromTime) + } + // month + v = int(fromTime.Month()) + i = sort.SearchInts(expr.monthList, v) + if i == len(expr.monthList) { + return expr.nextYear(fromTime) + } + if v != expr.monthList[i] { + return expr.nextMonth(fromTime) + } + + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(fromTime.Year(), int(fromTime.Month())) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(fromTime) + } + + // day of month + v = fromTime.Day() + i = sort.SearchInts(expr.actualDaysOfMonthList, v) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(fromTime) + } + if v != expr.actualDaysOfMonthList[i] { + return expr.nextDayOfMonth(fromTime) + } + // hour + v = fromTime.Hour() + i = sort.SearchInts(expr.hourList, v) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(fromTime) + } + if v != expr.hourList[i] { + return expr.nextHour(fromTime) + } + // minute + v = fromTime.Minute() + i = sort.SearchInts(expr.minuteList, v) + if i == len(expr.minuteList) { + return expr.nextHour(fromTime) + } + if v != expr.minuteList[i] { + return expr.nextMinute(fromTime) + } + // second + v = fromTime.Second() + i = sort.SearchInts(expr.secondList, v) + if i == len(expr.secondList) { + return expr.nextMinute(fromTime) + } + + // If we reach this point, there is nothing better to do + // than to move to the next second + + return expr.nextSecond(fromTime) +} + +/******************************************************************************/ + +// NextN returns a slice of `n` closest time instants immediately following +// `fromTime` which match the cron expression `expr`. +// +// The time instants in the returned slice are in chronological ascending order. +// The `time.Location` of the returned time instants is the same as that of +// `fromTime`. +// +// A slice with len between [0-`n`] is returned, that is, if not enough existing +// matching time instants exist, the number of returned entries will be less +// than `n`. +func (expr *Expression) NextN(fromTime time.Time, n uint) []time.Time { + nextTimes := make([]time.Time, 0, n) + if n > 0 { + fromTime = expr.Next(fromTime) + for { + if fromTime.IsZero() { + break + } + nextTimes = append(nextTimes, fromTime) + n -= 1 + if n == 0 { + break + } + fromTime = expr.nextSecond(fromTime) + } + } + return nextTimes +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_next.go b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go new file mode 100644 index 0000000000..a0ebdb6b20 --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_next.go @@ -0,0 +1,292 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_next.go + * Version: 1.0 + * License: pick the one which suits you : + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "sort" + "time" +) + +/******************************************************************************/ + +var dowNormalizedOffsets = [][]int{ + {1, 8, 15, 22, 29}, + {2, 9, 16, 23, 30}, + {3, 10, 17, 24, 31}, + {4, 11, 18, 25}, + {5, 12, 19, 26}, + {6, 13, 20, 27}, + {7, 14, 21, 28}, +} + +/******************************************************************************/ + +func (expr *Expression) nextYear(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate year + i := sort.SearchInts(expr.yearList, t.Year()+1) + if i == len(expr.yearList) { + return time.Time{} + } + // Year changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(expr.yearList[i], expr.monthList[0]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + return time.Date( + expr.yearList[i], + time.Month(expr.monthList[0]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate month + i := sort.SearchInts(expr.monthList, int(t.Month())+1) + if i == len(expr.monthList) { + return expr.nextYear(t) + } + // Month changed, need to recalculate actual days of month + expr.actualDaysOfMonthList = expr.calculateActualDaysOfMonth(t.Year(), expr.monthList[i]) + if len(expr.actualDaysOfMonthList) == 0 { + return expr.nextMonth(time.Date( + t.Year(), + time.Month(expr.monthList[i]), + 1, + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location())) + } + + return time.Date( + t.Year(), + time.Month(expr.monthList[i]), + expr.actualDaysOfMonthList[0], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextDayOfMonth(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate day of month + i := sort.SearchInts(expr.actualDaysOfMonthList, t.Day()+1) + if i == len(expr.actualDaysOfMonthList) { + return expr.nextMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + expr.actualDaysOfMonthList[i], + expr.hourList[0], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextHour(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate hour + i := sort.SearchInts(expr.hourList, t.Hour()+1) + if i == len(expr.hourList) { + return expr.nextDayOfMonth(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + expr.hourList[i], + expr.minuteList[0], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextMinute(t time.Time) time.Time { + // Find index at which item in list is greater or equal to + // candidate minute + i := sort.SearchInts(expr.minuteList, t.Minute()+1) + if i == len(expr.minuteList) { + return expr.nextHour(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + expr.minuteList[i], + expr.secondList[0], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) nextSecond(t time.Time) time.Time { + // nextSecond() assumes all other fields are exactly matched + // to the cron expression + + // Find index at which item in list is greater or equal to + // candidate second + i := sort.SearchInts(expr.secondList, t.Second()+1) + if i == len(expr.secondList) { + return expr.nextMinute(t) + } + + return time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + expr.secondList[i], + 0, + t.Location()) +} + +/******************************************************************************/ + +func (expr *Expression) calculateActualDaysOfMonth(year, month int) []int { + actualDaysOfMonthMap := make(map[int]bool) + firstDayOfMonth := time.Date(year, time.Month(month), 1, 0, 0, 0, 0, time.UTC) + lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1) + + // As per crontab man page (http://linux.die.net/man/5/crontab#): + // "The day of a command's execution can be specified by two + // "fields - day of month, and day of week. If both fields are + // "restricted (ie, aren't *), the command will be run when + // "either field matches the current time" + + // If both fields are not restricted, all days of the month are a hit + if expr.daysOfMonthRestricted == false && expr.daysOfWeekRestricted == false { + return genericDefaultList[1 : lastDayOfMonth.Day()+1] + } + + // day-of-month != `*` + if expr.daysOfMonthRestricted { + // Last day of month + if expr.lastDayOfMonth { + actualDaysOfMonthMap[lastDayOfMonth.Day()] = true + } + // Last work day of month + if expr.lastWorkdayOfMonth { + actualDaysOfMonthMap[workdayOfMonth(lastDayOfMonth, lastDayOfMonth)] = true + } + // Days of month + for v := range expr.daysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Work days of month + // As per Wikipedia: month boundaries are not crossed. + for v := range expr.workdaysOfMonth { + // Ignore days beyond end of month + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[workdayOfMonth(firstDayOfMonth.AddDate(0, 0, v-1), lastDayOfMonth)] = true + } + } + } + + // day-of-week != `*` + if expr.daysOfWeekRestricted { + // How far first sunday is from first day of month + offset := 7 - int(firstDayOfMonth.Weekday()) + // days of week + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.daysOfWeek { + w := dowNormalizedOffsets[(offset+v)%7] + actualDaysOfMonthMap[w[0]] = true + actualDaysOfMonthMap[w[1]] = true + actualDaysOfMonthMap[w[2]] = true + actualDaysOfMonthMap[w[3]] = true + if len(w) > 4 && w[4] <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[w[4]] = true + } + } + // days of week of specific week in the month + // offset : (7 - day_of_week_of_1st_day_of_month) + // target : 1 + (7 * week_of_month) + (offset + day_of_week) % 7 + for v := range expr.specificWeekDaysOfWeek { + v = 1 + 7*(v/7) + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + // Last days of week of the month + lastWeekOrigin := firstDayOfMonth.AddDate(0, 1, -7) + offset = 7 - int(lastWeekOrigin.Weekday()) + for v := range expr.lastWeekDaysOfWeek { + v = lastWeekOrigin.Day() + (offset+v)%7 + if v <= lastDayOfMonth.Day() { + actualDaysOfMonthMap[v] = true + } + } + } + + return toList(actualDaysOfMonthMap) +} + +func workdayOfMonth(targetDom, lastDom time.Time) int { + // If saturday, then friday + // If sunday, then monday + dom := targetDom.Day() + dow := targetDom.Weekday() + if dow == time.Saturday { + if dom > 1 { + dom -= 1 + } else { + dom += 2 + } + } else if dow == time.Sunday { + if dom < lastDom.Day() { + dom += 1 + } else { + dom -= 2 + } + } + return dom +} diff --git a/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go new file mode 100644 index 0000000000..be6ef1769f --- /dev/null +++ b/vendor/github.com/gorhill/cronexpr/cronexpr_parse.go @@ -0,0 +1,498 @@ +/*! + * Copyright 2013 Raymond Hill + * + * Project: github.com/gorhill/cronexpr + * File: cronexpr_parse.go + * Version: 1.0 + * License: pick the one which suits you best: + * GPL v3 see + * APL v2 see + * + */ + +package cronexpr + +/******************************************************************************/ + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +/******************************************************************************/ + +var ( + genericDefaultList = []int{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + } + yearDefaultList = []int{ + 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, + 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, + 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, + 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, + 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, + 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, + 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, + 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, + 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, + 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, + } +) + +/******************************************************************************/ + +var ( + numberTokens = map[string]int{ + "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, + "00": 0, "01": 1, "02": 2, "03": 3, "04": 4, "05": 5, "06": 6, "07": 7, "08": 8, "09": 9, + "10": 10, "11": 11, "12": 12, "13": 13, "14": 14, "15": 15, "16": 16, "17": 17, "18": 18, "19": 19, + "20": 20, "21": 21, "22": 22, "23": 23, "24": 24, "25": 25, "26": 26, "27": 27, "28": 28, "29": 29, + "30": 30, "31": 31, "32": 32, "33": 33, "34": 34, "35": 35, "36": 36, "37": 37, "38": 38, "39": 39, + "40": 40, "41": 41, "42": 42, "43": 43, "44": 44, "45": 45, "46": 46, "47": 47, "48": 48, "49": 49, + "50": 50, "51": 51, "52": 52, "53": 53, "54": 54, "55": 55, "56": 56, "57": 57, "58": 58, "59": 59, + "1970": 1970, "1971": 1971, "1972": 1972, "1973": 1973, "1974": 1974, "1975": 1975, "1976": 1976, "1977": 1977, "1978": 1978, "1979": 1979, + "1980": 1980, "1981": 1981, "1982": 1982, "1983": 1983, "1984": 1984, "1985": 1985, "1986": 1986, "1987": 1987, "1988": 1988, "1989": 1989, + "1990": 1990, "1991": 1991, "1992": 1992, "1993": 1993, "1994": 1994, "1995": 1995, "1996": 1996, "1997": 1997, "1998": 1998, "1999": 1999, + "2000": 2000, "2001": 2001, "2002": 2002, "2003": 2003, "2004": 2004, "2005": 2005, "2006": 2006, "2007": 2007, "2008": 2008, "2009": 2009, + "2010": 2010, "2011": 2011, "2012": 2012, "2013": 2013, "2014": 2014, "2015": 2015, "2016": 2016, "2017": 2017, "2018": 2018, "2019": 2019, + "2020": 2020, "2021": 2021, "2022": 2022, "2023": 2023, "2024": 2024, "2025": 2025, "2026": 2026, "2027": 2027, "2028": 2028, "2029": 2029, + "2030": 2030, "2031": 2031, "2032": 2032, "2033": 2033, "2034": 2034, "2035": 2035, "2036": 2036, "2037": 2037, "2038": 2038, "2039": 2039, + "2040": 2040, "2041": 2041, "2042": 2042, "2043": 2043, "2044": 2044, "2045": 2045, "2046": 2046, "2047": 2047, "2048": 2048, "2049": 2049, + "2050": 2050, "2051": 2051, "2052": 2052, "2053": 2053, "2054": 2054, "2055": 2055, "2056": 2056, "2057": 2057, "2058": 2058, "2059": 2059, + "2060": 2060, "2061": 2061, "2062": 2062, "2063": 2063, "2064": 2064, "2065": 2065, "2066": 2066, "2067": 2067, "2068": 2068, "2069": 2069, + "2070": 2070, "2071": 2071, "2072": 2072, "2073": 2073, "2074": 2074, "2075": 2075, "2076": 2076, "2077": 2077, "2078": 2078, "2079": 2079, + "2080": 2080, "2081": 2081, "2082": 2082, "2083": 2083, "2084": 2084, "2085": 2085, "2086": 2086, "2087": 2087, "2088": 2088, "2089": 2089, + "2090": 2090, "2091": 2091, "2092": 2092, "2093": 2093, "2094": 2094, "2095": 2095, "2096": 2096, "2097": 2097, "2098": 2098, "2099": 2099, + } + monthTokens = map[string]int{ + `1`: 1, `jan`: 1, `january`: 1, + `2`: 2, `feb`: 2, `february`: 2, + `3`: 3, `mar`: 3, `march`: 3, + `4`: 4, `apr`: 4, `april`: 4, + `5`: 5, `may`: 5, + `6`: 6, `jun`: 6, `june`: 6, + `7`: 7, `jul`: 7, `july`: 7, + `8`: 8, `aug`: 8, `august`: 8, + `9`: 9, `sep`: 9, `september`: 9, + `10`: 10, `oct`: 10, `october`: 10, + `11`: 11, `nov`: 11, `november`: 11, + `12`: 12, `dec`: 12, `december`: 12, + } + dowTokens = map[string]int{ + `0`: 0, `sun`: 0, `sunday`: 0, + `1`: 1, `mon`: 1, `monday`: 1, + `2`: 2, `tue`: 2, `tuesday`: 2, + `3`: 3, `wed`: 3, `wednesday`: 3, + `4`: 4, `thu`: 4, `thursday`: 4, + `5`: 5, `fri`: 5, `friday`: 5, + `6`: 6, `sat`: 6, `saturday`: 6, + `7`: 0, + } +) + +/******************************************************************************/ + +func atoi(s string) int { + return numberTokens[s] +} + +type fieldDescriptor struct { + name string + min, max int + defaultList []int + valuePattern string + atoi func(string) int +} + +var ( + secondDescriptor = fieldDescriptor{ + name: "second", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `0?[0-9]|[1-5][0-9]`, + atoi: atoi, + } + minuteDescriptor = fieldDescriptor{ + name: "minute", + min: 0, + max: 59, + defaultList: genericDefaultList[0:60], + valuePattern: `0?[0-9]|[1-5][0-9]`, + atoi: atoi, + } + hourDescriptor = fieldDescriptor{ + name: "hour", + min: 0, + max: 23, + defaultList: genericDefaultList[0:24], + valuePattern: `0?[0-9]|1[0-9]|2[0-3]`, + atoi: atoi, + } + domDescriptor = fieldDescriptor{ + name: "day-of-month", + min: 1, + max: 31, + defaultList: genericDefaultList[1:32], + valuePattern: `0?[1-9]|[12][0-9]|3[01]`, + atoi: atoi, + } + monthDescriptor = fieldDescriptor{ + name: "month", + min: 1, + max: 12, + defaultList: genericDefaultList[1:13], + valuePattern: `0?[1-9]|1[012]|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec|january|february|march|april|march|april|june|july|august|september|october|november|december`, + atoi: func(s string) int { + return monthTokens[s] + }, + } + dowDescriptor = fieldDescriptor{ + name: "day-of-week", + min: 0, + max: 6, + defaultList: genericDefaultList[0:7], + valuePattern: `0?[0-7]|sun|mon|tue|wed|thu|fri|sat|sunday|monday|tuesday|wednesday|thursday|friday|saturday`, + atoi: func(s string) int { + return dowTokens[s] + }, + } + yearDescriptor = fieldDescriptor{ + name: "year", + min: 1970, + max: 2099, + defaultList: yearDefaultList[:], + valuePattern: `19[789][0-9]|20[0-9]{2}`, + atoi: atoi, + } +) + +/******************************************************************************/ + +var ( + layoutWildcard = `^\*$|^\?$` + layoutValue = `^(%value%)$` + layoutRange = `^(%value%)-(%value%)$` + layoutWildcardAndInterval = `^\*/(\d+)$` + layoutValueAndInterval = `^(%value%)/(\d+)$` + layoutRangeAndInterval = `^(%value%)-(%value%)/(\d+)$` + layoutLastDom = `^l$` + layoutWorkdom = `^(%value%)w$` + layoutLastWorkdom = `^lw$` + layoutDowOfLastWeek = `^(%value%)l$` + layoutDowOfSpecificWeek = `^(%value%)#([1-5])$` + fieldFinder = regexp.MustCompile(`\S+`) + entryFinder = regexp.MustCompile(`[^,]+`) + layoutRegexp = make(map[string]*regexp.Regexp) +) + +/******************************************************************************/ + +var cronNormalizer = strings.NewReplacer( + "@yearly", "0 0 0 1 1 * *", + "@annually", "0 0 0 1 1 * *", + "@monthly", "0 0 0 1 * * *", + "@weekly", "0 0 0 * * 0 *", + "@daily", "0 0 0 * * * *", + "@hourly", "0 0 * * * * *") + +/******************************************************************************/ + +func (expr *Expression) secondFieldHandler(s string) error { + var err error + expr.secondList, err = genericFieldHandler(s, secondDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) minuteFieldHandler(s string) error { + var err error + expr.minuteList, err = genericFieldHandler(s, minuteDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) hourFieldHandler(s string) error { + var err error + expr.hourList, err = genericFieldHandler(s, hourDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) monthFieldHandler(s string) error { + var err error + expr.monthList, err = genericFieldHandler(s, monthDescriptor) + return err +} + +/******************************************************************************/ + +func (expr *Expression) yearFieldHandler(s string) error { + var err error + expr.yearList, err = genericFieldHandler(s, yearDescriptor) + return err +} + +/******************************************************************************/ + +const ( + none = 0 + one = 1 + span = 2 + all = 3 +) + +type cronDirective struct { + kind int + first int + last int + step int + sbeg int + send int +} + +func genericFieldHandler(s string, desc fieldDescriptor) ([]int, error) { + directives, err := genericFieldParse(s, desc) + if err != nil { + return nil, err + } + values := make(map[int]bool) + for _, directive := range directives { + switch directive.kind { + case none: + return nil, fmt.Errorf("syntax error in %s field: '%s'", desc.name, s[directive.sbeg:directive.send]) + case one: + populateOne(values, directive.first) + case span: + populateMany(values, directive.first, directive.last, directive.step) + case all: + return desc.defaultList, nil + } + } + return toList(values), nil +} + +func (expr *Expression) dowFieldHandler(s string) error { + expr.daysOfWeekRestricted = true + expr.daysOfWeek = make(map[int]bool) + expr.lastWeekDaysOfWeek = make(map[int]bool) + expr.specificWeekDaysOfWeek = make(map[int]bool) + + directives, err := genericFieldParse(s, dowDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `5L` + pairs := makeLayoutRegexp(layoutDowOfLastWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.lastWeekDaysOfWeek, dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + // `5#3` + pairs := makeLayoutRegexp(layoutDowOfSpecificWeek, dowDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.specificWeekDaysOfWeek, (dowDescriptor.atoi(snormal[pairs[4]:pairs[5]])-1)*7+(dowDescriptor.atoi(snormal[pairs[2]:pairs[3]])%7)) + } else { + return fmt.Errorf("syntax error in day-of-week field: '%s'", sdirective) + } + } + case one: + populateOne(expr.daysOfWeek, directive.first) + case span: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfWeek, directive.first, directive.last, directive.step) + expr.daysOfWeekRestricted = false + } + } + return nil +} + +func (expr *Expression) domFieldHandler(s string) error { + expr.daysOfMonthRestricted = true + expr.lastDayOfMonth = false + expr.lastWorkdayOfMonth = false + expr.daysOfMonth = make(map[int]bool) // days of month map + expr.workdaysOfMonth = make(map[int]bool) // work days of month map + + directives, err := genericFieldParse(s, domDescriptor) + if err != nil { + return err + } + + for _, directive := range directives { + switch directive.kind { + case none: + sdirective := s[directive.sbeg:directive.send] + snormal := strings.ToLower(sdirective) + // `L` + if makeLayoutRegexp(layoutLastDom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastDayOfMonth = true + } else { + // `LW` + if makeLayoutRegexp(layoutLastWorkdom, domDescriptor.valuePattern).MatchString(snormal) { + expr.lastWorkdayOfMonth = true + } else { + // `15W` + pairs := makeLayoutRegexp(layoutWorkdom, domDescriptor.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + populateOne(expr.workdaysOfMonth, domDescriptor.atoi(snormal[pairs[2]:pairs[3]])) + } else { + return fmt.Errorf("syntax error in day-of-month field: '%s'", sdirective) + } + } + } + case one: + populateOne(expr.daysOfMonth, directive.first) + case span: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + case all: + populateMany(expr.daysOfMonth, directive.first, directive.last, directive.step) + expr.daysOfMonthRestricted = false + } + } + return nil +} + +/******************************************************************************/ + +func populateOne(values map[int]bool, v int) { + values[v] = true +} + +func populateMany(values map[int]bool, min, max, step int) { + for i := min; i <= max; i += step { + values[i] = true + } +} + +func toList(set map[int]bool) []int { + list := make([]int, len(set)) + i := 0 + for k := range set { + list[i] = k + i += 1 + } + sort.Ints(list) + return list +} + +/******************************************************************************/ + +func genericFieldParse(s string, desc fieldDescriptor) ([]*cronDirective, error) { + // At least one entry must be present + indices := entryFinder.FindAllStringIndex(s, -1) + if len(indices) == 0 { + return nil, fmt.Errorf("%s field: missing directive", desc.name) + } + + directives := make([]*cronDirective, 0, len(indices)) + + for i := range indices { + directive := cronDirective{ + sbeg: indices[i][0], + send: indices[i][1], + } + snormal := strings.ToLower(s[indices[i][0]:indices[i][1]]) + + // `*` + if makeLayoutRegexp(layoutWildcard, desc.valuePattern).MatchString(snormal) { + directive.kind = all + directive.first = desc.min + directive.last = desc.max + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `5` + if makeLayoutRegexp(layoutValue, desc.valuePattern).MatchString(snormal) { + directive.kind = one + directive.first = desc.atoi(snormal) + directives = append(directives, &directive) + continue + } + // `5-20` + pairs := makeLayoutRegexp(layoutRange, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = 1 + directives = append(directives, &directive) + continue + } + // `*/2` + pairs = makeLayoutRegexp(layoutWildcardAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.min + directive.last = desc.max + directive.step = atoi(snormal[pairs[2]:pairs[3]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // `5/2` + pairs = makeLayoutRegexp(layoutValueAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.max + directive.step = atoi(snormal[pairs[4]:pairs[5]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // `5-20/2` + pairs = makeLayoutRegexp(layoutRangeAndInterval, desc.valuePattern).FindStringSubmatchIndex(snormal) + if len(pairs) > 0 { + directive.kind = span + directive.first = desc.atoi(snormal[pairs[2]:pairs[3]]) + directive.last = desc.atoi(snormal[pairs[4]:pairs[5]]) + directive.step = atoi(snormal[pairs[6]:pairs[7]]) + if directive.step < 1 || directive.step > desc.max { + return nil, fmt.Errorf("invalid interval %s", snormal) + } + directives = append(directives, &directive) + continue + } + // No behavior for this one, let caller deal with it + directive.kind = none + directives = append(directives, &directive) + } + return directives, nil +} + +/******************************************************************************/ + +func makeLayoutRegexp(layout, value string) *regexp.Regexp { + layout = strings.Replace(layout, `%value%`, value, -1) + re := layoutRegexp[layout] + if re == nil { + re = regexp.MustCompile(layout) + layoutRegexp[layout] = re + } + return re +} diff --git a/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go new file mode 100644 index 0000000000..f3e6e8ca4e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/api/contexts/contexts.go @@ -0,0 +1,14 @@ +package contexts + +// Context defines the scope in which a search for Nomad object operates +type Context string + +const ( + Allocs Context = "allocs" + Deployments Context = "deployment" + Evals Context = "evals" + Jobs Context = "jobs" + Nodes Context = "nodes" + Namespaces Context = "namespaces" + All Context = "all" +) diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go new file mode 100644 index 0000000000..0b07960591 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -0,0 +1,272 @@ +package helper + +import ( + "crypto/sha512" + "fmt" + "regexp" + "time" +) + +// validUUID is used to check if a given string looks like a UUID +var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`) + +// IsUUID returns true if the given string is a valid UUID. +func IsUUID(str string) bool { + const uuidLen = 36 + if len(str) != uuidLen { + return false + } + + return validUUID.MatchString(str) +} + +// HashUUID takes an input UUID and returns a hashed version of the UUID to +// ensure it is well distributed. +func HashUUID(input string) (output string, hashed bool) { + if !IsUUID(input) { + return "", false + } + + // Hash the input + buf := sha512.Sum512([]byte(input)) + output = fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) + + return output, true +} + +// boolToPtr returns the pointer to a boolean +func BoolToPtr(b bool) *bool { + return &b +} + +// IntToPtr returns the pointer to an int +func IntToPtr(i int) *int { + return &i +} + +// Int64ToPtr returns the pointer to an int +func Int64ToPtr(i int64) *int64 { + return &i +} + +// UintToPtr returns the pointer to an uint +func Uint64ToPtr(u uint64) *uint64 { + return &u +} + +// StringToPtr returns the pointer to a string +func StringToPtr(str string) *string { + return &str +} + +// TimeToPtr returns the pointer to a time stamp +func TimeToPtr(t time.Duration) *time.Duration { + return &t +} + +func IntMin(a, b int) int { + if a < b { + return a + } + return b +} + +func IntMax(a, b int) int { + if a > b { + return a + } + return b +} + +func Uint64Max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// MapStringStringSliceValueSet returns the set of values in a map[string][]string +func MapStringStringSliceValueSet(m map[string][]string) []string { + set := make(map[string]struct{}) + for _, slice := range m { + for _, v := range slice { + set[v] = struct{}{} + } + } + + flat := make([]string, 0, len(set)) + for k := range set { + flat = append(flat, k) + } + return flat +} + +func SliceStringToSet(s []string) map[string]struct{} { + m := make(map[string]struct{}, (len(s)+1)/2) + for _, k := range s { + m[k] = struct{}{} + } + return m +} + +// SliceStringIsSubset returns whether the smaller set of strings is a subset of +// the larger. If the smaller slice is not a subset, the offending elements are +// returned. +func SliceStringIsSubset(larger, smaller []string) (bool, []string) { + largerSet := make(map[string]struct{}, len(larger)) + for _, l := range larger { + largerSet[l] = struct{}{} + } + + subset := true + var offending []string + for _, s := range smaller { + if _, ok := largerSet[s]; !ok { + subset = false + offending = append(offending, s) + } + } + + return subset, offending +} + +func SliceSetDisjoint(first, second []string) (bool, []string) { + contained := make(map[string]struct{}, len(first)) + for _, k := range first { + contained[k] = struct{}{} + } + + offending := make(map[string]struct{}) + for _, k := range second { + if _, ok := contained[k]; ok { + offending[k] = struct{}{} + } + } + + if len(offending) == 0 { + return true, nil + } + + flattened := make([]string, 0, len(offending)) + for k := range offending { + flattened = append(flattened, k) + } + return false, flattened +} + +// Helpers for copying generic structures. +func CopyMapStringString(m map[string]string) map[string]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]string, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]struct{}, l) + for k, _ := range m { + c[k] = struct{}{} + } + return c +} + +func CopyMapStringInt(m map[string]int) map[string]int { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]int, l) + for k, v := range m { + c[k] = v + } + return c +} + +func CopyMapStringFloat64(m map[string]float64) map[string]float64 { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string]float64, l) + for k, v := range m { + c[k] = v + } + return c +} + +// CopyMapStringSliceString copies a map of strings to string slices such as +// http.Header +func CopyMapStringSliceString(m map[string][]string) map[string][]string { + l := len(m) + if l == 0 { + return nil + } + + c := make(map[string][]string, l) + for k, v := range m { + c[k] = CopySliceString(v) + } + return c +} + +func CopySliceString(s []string) []string { + l := len(s) + if l == 0 { + return nil + } + + c := make([]string, l) + for i, v := range s { + c[i] = v + } + return c +} + +func CopySliceInt(s []int) []int { + l := len(s) + if l == 0 { + return nil + } + + c := make([]int, l) + for i, v := range s { + c[i] = v + } + return c +} + +// CleanEnvVar replaces all occurrences of illegal characters in an environment +// variable with the specified byte. +func CleanEnvVar(s string, r byte) string { + b := []byte(s) + for i, c := range b { + switch { + case c == '_': + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case i > 0 && c >= '0' && c <= '9': + default: + // Replace! + b[i] = r + } + } + return string(b) +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go new file mode 100644 index 0000000000..63758a0be6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go @@ -0,0 +1,78 @@ +package structs + +import "fmt" + +// Bitmap is a simple uncompressed bitmap +type Bitmap []byte + +// NewBitmap returns a bitmap with up to size indexes +func NewBitmap(size uint) (Bitmap, error) { + if size == 0 { + return nil, fmt.Errorf("bitmap must be positive size") + } + if size&7 != 0 { + return nil, fmt.Errorf("bitmap must be byte aligned") + } + b := make([]byte, size>>3) + return Bitmap(b), nil +} + +// Copy returns a copy of the Bitmap +func (b Bitmap) Copy() (Bitmap, error) { + if b == nil { + return nil, fmt.Errorf("can't copy nil Bitmap") + } + + raw := make([]byte, len(b)) + copy(raw, b) + return Bitmap(raw), nil +} + +// Size returns the size of the bitmap +func (b Bitmap) Size() uint { + return uint(len(b) << 3) +} + +// Set is used to set the given index of the bitmap +func (b Bitmap) Set(idx uint) { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + b[bucket] |= mask +} + +// Unset is used to unset the given index of the bitmap +func (b Bitmap) Unset(idx uint) { + bucket := idx >> 3 + // Mask should be all ones minus the idx position + offset := 1 << (idx & 7) + mask := byte(offset ^ 0xff) + b[bucket] &= mask +} + +// Check is used to check the given index of the bitmap +func (b Bitmap) Check(idx uint) bool { + bucket := idx >> 3 + mask := byte(1 << (idx & 7)) + return (b[bucket] & mask) != 0 +} + +// Clear is used to efficiently clear the bitmap +func (b Bitmap) Clear() { + for i := range b { + b[i] = 0 + } +} + +// IndexesInRange returns the indexes in which the values are either set or unset based +// on the passed parameter in the passed range +func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { + var indexes []int + for i := from; i <= to && i < b.Size(); i++ { + c := b.Check(i) + if c && set || !c && !set { + indexes = append(indexes, int(i)) + } + } + + return indexes +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go new file mode 100644 index 0000000000..b8ced9180f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go @@ -0,0 +1,1231 @@ +package structs + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/nomad/helper/flatmap" + "github.com/mitchellh/hashstructure" +) + +// DiffType denotes the type of a diff object. +type DiffType string + +var ( + DiffTypeNone DiffType = "None" + DiffTypeAdded DiffType = "Added" + DiffTypeDeleted DiffType = "Deleted" + DiffTypeEdited DiffType = "Edited" +) + +func (d DiffType) Less(other DiffType) bool { + // Edited > Added > Deleted > None + // But we do a reverse sort + if d == other { + return false + } + + if d == DiffTypeEdited { + return true + } else if other == DiffTypeEdited { + return false + } else if d == DiffTypeAdded { + return true + } else if other == DiffTypeAdded { + return false + } else if d == DiffTypeDeleted { + return true + } else if other == DiffTypeDeleted { + return false + } + + return true +} + +// JobDiff contains the diff of two jobs. +type JobDiff struct { + Type DiffType + ID string + Fields []*FieldDiff + Objects []*ObjectDiff + TaskGroups []*TaskGroupDiff +} + +// Diff returns a diff of two jobs and a potential error if the Jobs are not +// diffable. If contextual diff is enabled, objects within the job will contain +// field information even if unchanged. +func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { + // COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups + // in 0.6.0 + diff := &JobDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex", + "ModifyIndex", "JobModifyIndex", "Update", "SubmitTime"} + + if j == nil && other == nil { + return diff, nil + } else if j == nil { + j = &Job{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } else if other == nil { + other = &Job{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + diff.ID = j.ID + } else { + if j.ID != other.ID { + return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID) + } + + oldPrimitiveFlat = flatmap.Flatten(j, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + diff.ID = other.ID + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Datacenters diff + if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters", contextual); setDiff != nil && setDiff.Type != DiffTypeNone { + diff.Objects = append(diff.Objects, setDiff) + } + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(j.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Task groups diff + tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual) + if err != nil { + return nil, err + } + diff.TaskGroups = tgs + + // Periodic diff + if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil { + diff.Objects = append(diff.Objects, pDiff) + } + + // ParameterizedJob diff + if cDiff := parameterizedJobDiff(j.ParameterizedJob, other.ParameterizedJob, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Check to see if there is a diff. We don't use reflect because we are + // filtering quite a few fields that will change on each diff. + if diff.Type == DiffTypeNone { + for _, fd := range diff.Fields { + if fd.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, od := range diff.Objects { + if od.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + if diff.Type == DiffTypeNone { + for _, tg := range diff.TaskGroups { + if tg.Type != DiffTypeNone { + diff.Type = DiffTypeEdited + break + } + } + } + + return diff, nil +} + +func (j *JobDiff) GoString() string { + out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type) + + for _, f := range j.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range j.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, tg := range j.TaskGroups { + out += fmt.Sprintf("%#v\n", tg) + } + + return out +} + +// TaskGroupDiff contains the diff of two task groups. +type TaskGroupDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Tasks []*TaskDiff + Updates map[string]uint64 +} + +// Diff returns a diff of two task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) { + diff := &TaskGroupDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name"} + + if tg == nil && other == nil { + return diff, nil + } else if tg == nil { + tg = &TaskGroup{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &TaskGroup{} + diff.Type = DiffTypeDeleted + diff.Name = tg.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + } else { + if !reflect.DeepEqual(tg, other) { + diff.Type = DiffTypeEdited + } + if tg.Name != other.Name { + return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(tg.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Restart policy diff + rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual) + if rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // EphemeralDisk diff + diskDiff := primitiveObjectDiff(tg.EphemeralDisk, other.EphemeralDisk, nil, "EphemeralDisk", contextual) + if diskDiff != nil { + diff.Objects = append(diff.Objects, diskDiff) + } + + // Update diff + // COMPAT: Remove "Stagger" in 0.7.0. + if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil { + diff.Objects = append(diff.Objects, uDiff) + } + + // Tasks diff + tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual) + if err != nil { + return nil, err + } + diff.Tasks = tasks + + return diff, nil +} + +func (tg *TaskGroupDiff) GoString() string { + out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type) + + if len(tg.Updates) != 0 { + out += "Updates {\n" + for update, count := range tg.Updates { + out += fmt.Sprintf("%d %s\n", count, update) + } + out += "}\n" + } + + for _, f := range tg.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range tg.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + for _, t := range tg.Tasks { + out += fmt.Sprintf("%#v\n", t) + } + + return out +} + +// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled, +// objects' fields will be stored even if no diff occurred as long as one field +// changed. +func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) { + oldMap := make(map[string]*TaskGroup, len(old)) + newMap := make(map[string]*TaskGroup, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskGroupDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskGroupDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskGroupDiffs +type TaskGroupDiffs []*TaskGroupDiff + +func (tg TaskGroupDiffs) Len() int { return len(tg) } +func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] } +func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name } + +// TaskDiff contains the diff of two Tasks +type TaskDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff + Annotations []string +} + +// Diff returns a diff of two tasks. If contextual diff is enabled, objects +// within the task will contain field information even if unchanged. +func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { + diff := &TaskDiff{Type: DiffTypeNone} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Name", "Config"} + + if t == nil && other == nil { + return diff, nil + } else if t == nil { + t = &Task{} + diff.Type = DiffTypeAdded + diff.Name = other.Name + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &Task{} + diff.Type = DiffTypeDeleted + diff.Name = t.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + } else { + if !reflect.DeepEqual(t, other) { + diff.Type = DiffTypeEdited + } + if t.Name != other.Name { + return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name) + } + diff.Name = other.Name + oldPrimitiveFlat = flatmap.Flatten(t, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) + + // Constraints diff + conDiff := primitiveObjectSetDiff( + interfaceSlice(t.Constraints), + interfaceSlice(other.Constraints), + []string{"str"}, + "Constraint", + contextual) + if conDiff != nil { + diff.Objects = append(diff.Objects, conDiff...) + } + + // Config diff + if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil { + diff.Objects = append(diff.Objects, cDiff) + } + + // Resources diff + if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil { + diff.Objects = append(diff.Objects, rDiff) + } + + // LogConfig diff + lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual) + if lDiff != nil { + diff.Objects = append(diff.Objects, lDiff) + } + + // Dispatch payload diff + dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) + if dDiff != nil { + diff.Objects = append(diff.Objects, dDiff) + } + + // Artifacts diff + diffs := primitiveObjectSetDiff( + interfaceSlice(t.Artifacts), + interfaceSlice(other.Artifacts), + nil, + "Artifact", + contextual) + if diffs != nil { + diff.Objects = append(diff.Objects, diffs...) + } + + // Services diff + if sDiffs := serviceDiffs(t.Services, other.Services, contextual); sDiffs != nil { + diff.Objects = append(diff.Objects, sDiffs...) + } + + // Vault diff + vDiff := vaultDiff(t.Vault, other.Vault, contextual) + if vDiff != nil { + diff.Objects = append(diff.Objects, vDiff) + } + + // Template diff + tmplDiffs := primitiveObjectSetDiff( + interfaceSlice(t.Templates), + interfaceSlice(other.Templates), + nil, + "Template", + contextual) + if tmplDiffs != nil { + diff.Objects = append(diff.Objects, tmplDiffs...) + } + + return diff, nil +} + +func (t *TaskDiff) GoString() string { + var out string + if len(t.Annotations) == 0 { + out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type) + } else { + out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ",")) + } + + for _, f := range t.Fields { + out += fmt.Sprintf("%#v\n", f) + } + + for _, o := range t.Objects { + out += fmt.Sprintf("%#v\n", o) + } + + return out +} + +// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) { + oldMap := make(map[string]*Task, len(old)) + newMap := make(map[string]*Task, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*TaskDiff + for name, oldGroup := range oldMap { + // Diff the same, deleted and edited + diff, err := oldGroup.Diff(newMap[name], contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + + for name, newGroup := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + diff, err := old.Diff(newGroup, contextual) + if err != nil { + return nil, err + } + diffs = append(diffs, diff) + } + } + + sort.Sort(TaskDiffs(diffs)) + return diffs, nil +} + +// For sorting TaskDiffs +type TaskDiffs []*TaskDiff + +func (t TaskDiffs) Len() int { return len(t) } +func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name } + +// serviceDiff returns the diff of two service objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func serviceDiff(old, new *Service, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Service"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Service{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Service{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Checks diffs + if cDiffs := serviceCheckDiffs(old.Checks, new.Checks, contextual); cDiffs != nil { + diff.Objects = append(diff.Objects, cDiffs...) + } + + return diff +} + +// serviceDiffs diffs a set of services. If contextual diff is enabled, unchanged +// fields within objects nested in the tasks will be returned. +func serviceDiffs(old, new []*Service, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*Service, len(old)) + newMap := make(map[string]*Service, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldService := range oldMap { + // Diff the same, deleted and edited + if diff := serviceDiff(oldService, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newService := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceDiff(old, newService, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// serviceCheckDiff returns the diff of two service check objects. If contextual +// diff is enabled, all fields will be returned, even if no diff occurred. +func serviceCheckDiff(old, new *ServiceCheck, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Check"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ServiceCheck{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ServiceCheck{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Diff Header + if headerDiff := checkHeaderDiff(old.Header, new.Header, contextual); headerDiff != nil { + diff.Objects = append(diff.Objects, headerDiff) + } + + return diff +} + +// checkHeaderDiff returns the diff of two service check header objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func checkHeaderDiff(old, new map[string][]string, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Header"} + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + oldFlat := flatmap.Flatten(old, nil, false) + newFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) + return diff +} + +// serviceCheckDiffs diffs a set of service checks. If contextual diff is +// enabled, unchanged fields within objects nested in the tasks will be +// returned. +func serviceCheckDiffs(old, new []*ServiceCheck, contextual bool) []*ObjectDiff { + oldMap := make(map[string]*ServiceCheck, len(old)) + newMap := make(map[string]*ServiceCheck, len(new)) + for _, o := range old { + oldMap[o.Name] = o + } + for _, n := range new { + newMap[n.Name] = n + } + + var diffs []*ObjectDiff + for name, oldCheck := range oldMap { + // Diff the same, deleted and edited + if diff := serviceCheckDiff(oldCheck, newMap[name], contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for name, newCheck := range newMap { + // Diff the added + if old, ok := oldMap[name]; !ok { + if diff := serviceCheckDiff(old, newCheck, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// vaultDiff returns the diff of two vault objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Vault"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &Vault{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &Vault{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Policies diffs + if setDiff := stringSetDiff(old.Policies, new.Policies, "Policies", contextual); setDiff != nil { + diff.Objects = append(diff.Objects, setDiff) + } + + return diff +} + +// parameterizedJobDiff returns the diff of two parameterized job objects. If +// contextual diff is enabled, all fields will be returned, even if no diff +// occurred. +func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "ParameterizedJob"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(old, new) { + return nil + } else if old == nil { + old = &ParameterizedJobConfig{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } else if new == nil { + new = &ParameterizedJobConfig{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(old, nil, true) + newPrimitiveFlat = flatmap.Flatten(new, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Meta diffs + if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { + diff.Objects = append(diff.Objects, optionalDiff) + } + + if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { + diff.Objects = append(diff.Objects, requiredDiff) + } + + return diff +} + +// Diff returns a diff of two resource objects. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &Resources{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } else if other == nil { + other = &Resources{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, nil, true) + newPrimitiveFlat = flatmap.Flatten(other, nil, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Network Resources diff + if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil { + diff.Objects = append(diff.Objects, nDiffs...) + } + + return diff +} + +// Diff returns a diff of two network resources. If contextual diff is enabled, +// non-changed fields will still be returned. +func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"} + var oldPrimitiveFlat, newPrimitiveFlat map[string]string + filter := []string{"Device", "CIDR", "IP"} + + if reflect.DeepEqual(r, other) { + return nil + } else if r == nil { + r = &NetworkResource{} + diff.Type = DiffTypeAdded + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } else if other == nil { + other = &NetworkResource{} + diff.Type = DiffTypeDeleted + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + } else { + diff.Type = DiffTypeEdited + oldPrimitiveFlat = flatmap.Flatten(r, filter, true) + newPrimitiveFlat = flatmap.Flatten(other, filter, true) + } + + // Diff the primitive fields. + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + // Port diffs + resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual) + dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual) + if resPorts != nil { + diff.Objects = append(diff.Objects, resPorts...) + } + if dynPorts != nil { + diff.Objects = append(diff.Objects, dynPorts...) + } + + return diff +} + +// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, +// non-changed fields will still be returned. +func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { + makeSet := func(objects []*NetworkResource) map[string]*NetworkResource { + objMap := make(map[string]*NetworkResource, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, oldV := range oldSet { + if newV, ok := newSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + for k, newV := range newSet { + if oldV, ok := oldSet[k]; !ok { + if diff := oldV.Diff(newV, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// portDiffs returns the diff of two sets of ports. The dynamic flag marks the +// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled, +// non-changed fields will still be returned. +func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { + makeSet := func(ports []Port) map[string]Port { + portMap := make(map[string]Port, len(ports)) + for _, port := range ports { + portMap[port.Label] = port + } + + return portMap + } + + oldPorts := makeSet(old) + newPorts := makeSet(new) + + var filter []string + name := "Static Port" + if dynamic { + filter = []string{"Value"} + name = "Dynamic Port" + } + + var diffs []*ObjectDiff + for portLabel, oldPort := range oldPorts { + // Diff the same, deleted and edited + if newPort, ok := newPorts[portLabel]; ok { + diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } else { + diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + for label, newPort := range newPorts { + // Diff the added + if _, ok := oldPorts[label]; !ok { + diff := primitiveObjectDiff(nil, newPort, filter, name, contextual) + if diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs + +} + +// configDiff returns the diff of two Task Config objects. If contextual diff is +// enabled, all fields will be returned, even if no diff occurred. +func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { + diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"} + if reflect.DeepEqual(old, new) { + return nil + } else if len(old) == 0 { + diff.Type = DiffTypeAdded + } else if len(new) == 0 { + diff.Type = DiffTypeDeleted + } else { + diff.Type = DiffTypeEdited + } + + // Diff the primitive fields. + oldPrimitiveFlat := flatmap.Flatten(old, nil, false) + newPrimitiveFlat := flatmap.Flatten(new, nil, false) + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + return diff +} + +// ObjectDiff contains the diff of two generic objects. +type ObjectDiff struct { + Type DiffType + Name string + Fields []*FieldDiff + Objects []*ObjectDiff +} + +func (o *ObjectDiff) GoString() string { + out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type) + for _, f := range o.Fields { + out += fmt.Sprintf("%#v\n", f) + } + for _, o := range o.Objects { + out += fmt.Sprintf("%#v\n", o) + } + out += "}" + return out +} + +func (o *ObjectDiff) Less(other *ObjectDiff) bool { + if reflect.DeepEqual(o, other) { + return false + } else if other == nil { + return false + } else if o == nil { + return true + } + + if o.Name != other.Name { + return o.Name < other.Name + } + + if o.Type != other.Type { + return o.Type.Less(other.Type) + } + + if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther { + return lO < lOther + } + + if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther { + return lO < lOther + } + + // Check each field + sort.Sort(FieldDiffs(o.Fields)) + sort.Sort(FieldDiffs(other.Fields)) + + for i, oV := range o.Fields { + if oV.Less(other.Fields[i]) { + return true + } + } + + // Check each object + sort.Sort(ObjectDiffs(o.Objects)) + sort.Sort(ObjectDiffs(other.Objects)) + for i, oV := range o.Objects { + if oV.Less(other.Objects[i]) { + return true + } + } + + return false +} + +// For sorting ObjectDiffs +type ObjectDiffs []*ObjectDiff + +func (o ObjectDiffs) Len() int { return len(o) } +func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) } + +type FieldDiff struct { + Type DiffType + Name string + Old, New string + Annotations []string +} + +// fieldDiff returns a FieldDiff if old and new are different otherwise, it +// returns nil. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiff(old, new, name string, contextual bool) *FieldDiff { + diff := &FieldDiff{Name: name, Type: DiffTypeNone} + if old == new { + if !contextual { + return nil + } + diff.Old, diff.New = old, new + return diff + } + + if old == "" { + diff.Type = DiffTypeAdded + diff.New = new + } else if new == "" { + diff.Type = DiffTypeDeleted + diff.Old = old + } else { + diff.Type = DiffTypeEdited + diff.Old = old + diff.New = new + } + return diff +} + +func (f *FieldDiff) GoString() string { + out := fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New) + if len(f.Annotations) != 0 { + out += fmt.Sprintf(" (%s)", strings.Join(f.Annotations, ", ")) + } + + return out +} + +func (f *FieldDiff) Less(other *FieldDiff) bool { + if reflect.DeepEqual(f, other) { + return false + } else if other == nil { + return false + } else if f == nil { + return true + } + + if f.Name != other.Name { + return f.Name < other.Name + } else if f.Old != other.Old { + return f.Old < other.Old + } + + return f.New < other.New +} + +// For sorting FieldDiffs +type FieldDiffs []*FieldDiff + +func (f FieldDiffs) Len() int { return len(f) } +func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) } + +// fieldDiffs takes a map of field names to their values and returns a set of +// field diffs. If contextual diff is enabled, even non-changed fields will be +// returned. +func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff { + var diffs []*FieldDiff + visited := make(map[string]struct{}) + for k, oldV := range old { + visited[k] = struct{}{} + newV := new[k] + if diff := fieldDiff(oldV, newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + + for k, newV := range new { + if _, ok := visited[k]; !ok { + if diff := fieldDiff("", newV, k, contextual); diff != nil { + diffs = append(diffs, diff) + } + } + } + + sort.Sort(FieldDiffs(diffs)) + return diffs +} + +// stringSetDiff diffs two sets of strings with the given name. +func stringSetDiff(old, new []string, name string, contextual bool) *ObjectDiff { + oldMap := make(map[string]struct{}, len(old)) + newMap := make(map[string]struct{}, len(new)) + for _, o := range old { + oldMap[o] = struct{}{} + } + for _, n := range new { + newMap[n] = struct{}{} + } + if reflect.DeepEqual(oldMap, newMap) && !contextual { + return nil + } + + diff := &ObjectDiff{Name: name} + var added, removed bool + for k := range oldMap { + if _, ok := newMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff(k, "", name, contextual)) + removed = true + } else if contextual { + diff.Fields = append(diff.Fields, fieldDiff(k, k, name, contextual)) + } + } + + for k := range newMap { + if _, ok := oldMap[k]; !ok { + diff.Fields = append(diff.Fields, fieldDiff("", k, name, contextual)) + added = true + } + } + + sort.Sort(FieldDiffs(diff.Fields)) + + // Determine the type + if added && removed { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if removed { + diff.Type = DiffTypeDeleted + } else { + // Diff of an empty set + if len(diff.Fields) == 0 { + return nil + } + + diff.Type = DiffTypeNone + } + + return diff +} + +// primitiveObjectDiff returns a diff of the passed objects' primitive fields. +// The filter field can be used to exclude fields from the diff. The name is the +// name of the objects. If contextual is set, non-changed fields will also be +// stored in the object diff. +func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff { + oldPrimitiveFlat := flatmap.Flatten(old, filter, true) + newPrimitiveFlat := flatmap.Flatten(new, filter, true) + delete(oldPrimitiveFlat, "") + delete(newPrimitiveFlat, "") + + diff := &ObjectDiff{Name: name} + diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) + + var added, deleted, edited bool + for _, f := range diff.Fields { + switch f.Type { + case DiffTypeEdited: + edited = true + break + case DiffTypeDeleted: + deleted = true + case DiffTypeAdded: + added = true + } + } + + if edited || added && deleted { + diff.Type = DiffTypeEdited + } else if added { + diff.Type = DiffTypeAdded + } else if deleted { + diff.Type = DiffTypeDeleted + } else { + return nil + } + + return diff +} + +// primitiveObjectSetDiff does a set difference of the old and new sets. The +// filter parameter can be used to filter a set of primitive fields in the +// passed structs. The name corresponds to the name of the passed objects. If +// contextual diff is enabled, objects' primtive fields will be returned even if +// no diff exists. +func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff { + makeSet := func(objects []interface{}) map[string]interface{} { + objMap := make(map[string]interface{}, len(objects)) + for _, obj := range objects { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(err) + } + objMap[fmt.Sprintf("%d", hash)] = obj + } + + return objMap + } + + oldSet := makeSet(old) + newSet := makeSet(new) + + var diffs []*ObjectDiff + for k, v := range oldSet { + // Deleted + if _, ok := newSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual)) + } + } + for k, v := range newSet { + // Added + if _, ok := oldSet[k]; !ok { + diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual)) + } + } + + sort.Sort(ObjectDiffs(diffs)) + return diffs +} + +// interfaceSlice is a helper method that takes a slice of typed elements and +// returns a slice of interface. This method will panic if given a non-slice +// input. +func interfaceSlice(slice interface{}) []interface{} { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("InterfaceSlice() given a non-slice type") + } + + ret := make([]interface{}, s.Len()) + + for i := 0; i < s.Len(); i++ { + ret[i] = s.Index(i).Interface() + } + + return ret +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go new file mode 100644 index 0000000000..751befd65d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go @@ -0,0 +1,310 @@ +package structs + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math" + "sort" + "strings" + + "golang.org/x/crypto/blake2b" + + multierror "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/nomad/acl" +) + +// MergeMultierrorWarnings takes job warnings and canonicalize warnings and +// merges them into a returnable string. Both the errors may be nil. +func MergeMultierrorWarnings(warnings ...error) string { + var warningMsg multierror.Error + for _, warn := range warnings { + if warn != nil { + multierror.Append(&warningMsg, warn) + } + } + + if len(warningMsg.Errors) == 0 { + return "" + } + + // Set the formatter + warningMsg.ErrorFormat = warningsFormatter + return warningMsg.Error() +} + +// warningsFormatter is used to format job warnings +func warningsFormatter(es []error) string { + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d warning(s):\n\n%s", + len(es), strings.Join(points, "\n")) +} + +// RemoveAllocs is used to remove any allocs with the given IDs +// from the list of allocations +func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { + // Convert remove into a set + removeSet := make(map[string]struct{}) + for _, remove := range remove { + removeSet[remove.ID] = struct{}{} + } + + n := len(alloc) + for i := 0; i < n; i++ { + if _, ok := removeSet[alloc[i].ID]; ok { + alloc[i], alloc[n-1] = alloc[n-1], nil + i-- + n-- + } + } + + alloc = alloc[:n] + return alloc +} + +// FilterTerminalAllocs filters out all allocations in a terminal state and +// returns the latest terminal allocations +func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { + terminalAllocsByName := make(map[string]*Allocation) + n := len(allocs) + for i := 0; i < n; i++ { + if allocs[i].TerminalStatus() { + + // Add the allocation to the terminal allocs map if it's not already + // added or has a higher create index than the one which is + // currently present. + alloc, ok := terminalAllocsByName[allocs[i].Name] + if !ok || alloc.CreateIndex < allocs[i].CreateIndex { + terminalAllocsByName[allocs[i].Name] = allocs[i] + } + + // Remove the allocation + allocs[i], allocs[n-1] = allocs[n-1], nil + i-- + n-- + } + } + return allocs[:n], terminalAllocsByName +} + +// AllocsFit checks if a given set of allocations will fit on a node. +// The netIdx can optionally be provided if its already been computed. +// If the netIdx is provided, it is assumed that the client has already +// ensured there are no collisions. +func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { + // Compute the utilization from zero + used := new(Resources) + + // Add the reserved resources of the node + if node.Reserved != nil { + if err := used.Add(node.Reserved); err != nil { + return false, "", nil, err + } + } + + // For each alloc, add the resources + for _, alloc := range allocs { + if alloc.Resources != nil { + if err := used.Add(alloc.Resources); err != nil { + return false, "", nil, err + } + } else if alloc.TaskResources != nil { + + // Adding the shared resource asks for the allocation to the used + // resources + if err := used.Add(alloc.SharedResources); err != nil { + return false, "", nil, err + } + // Allocations within the plan have the combined resources stripped + // to save space, so sum up the individual task resources. + for _, taskResource := range alloc.TaskResources { + if err := used.Add(taskResource); err != nil { + return false, "", nil, err + } + } + } else { + return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) + } + } + + // Check that the node resources are a super set of those + // that are being allocated + if superset, dimension := node.Resources.Superset(used); !superset { + return false, dimension, used, nil + } + + // Create the network index if missing + if netIdx == nil { + netIdx = NewNetworkIndex() + defer netIdx.Release() + if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { + return false, "reserved port collision", used, nil + } + } + + // Check if the network is overcommitted + if netIdx.Overcommitted() { + return false, "bandwidth exceeded", used, nil + } + + // Allocations fit! + return true, "", used, nil +} + +// ScoreFit is used to score the fit based on the Google work published here: +// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt +// This is equivalent to their BestFit v3 +func ScoreFit(node *Node, util *Resources) float64 { + // Determine the node availability + nodeCpu := float64(node.Resources.CPU) + if node.Reserved != nil { + nodeCpu -= float64(node.Reserved.CPU) + } + nodeMem := float64(node.Resources.MemoryMB) + if node.Reserved != nil { + nodeMem -= float64(node.Reserved.MemoryMB) + } + + // Compute the free percentage + freePctCpu := 1 - (float64(util.CPU) / nodeCpu) + freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) + + // Total will be "maximized" the smaller the value is. + // At 100% utilization, the total is 2, while at 0% util it is 20. + total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) + + // Invert so that the "maximized" total represents a high-value + // score. Because the floor is 20, we simply use that as an anchor. + // This means at a perfect fit, we return 18 as the score. + score := 20.0 - total + + // Bound the score, just in case + // If the score is over 18, that means we've overfit the node. + if score > 18.0 { + score = 18.0 + } else if score < 0 { + score = 0 + } + return score +} + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} + +func CopySliceConstraints(s []*Constraint) []*Constraint { + l := len(s) + if l == 0 { + return nil + } + + c := make([]*Constraint, l) + for i, v := range s { + c[i] = v.Copy() + } + return c +} + +// VaultPoliciesSet takes the structure returned by VaultPolicies and returns +// the set of required policies +func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { + set := make(map[string]struct{}) + + for _, tgp := range policies { + for _, tp := range tgp { + for _, p := range tp.Policies { + set[p] = struct{}{} + } + } + } + + flattened := make([]string, 0, len(set)) + for p := range set { + flattened = append(flattened, p) + } + return flattened +} + +// DenormalizeAllocationJobs is used to attach a job to all allocations that are +// non-terminal and do not have a job already. This is useful in cases where the +// job is normalized. +func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { + if job != nil { + for _, alloc := range allocs { + if alloc.Job == nil && !alloc.TerminalStatus() { + alloc.Job = job + } + } + } +} + +// AllocName returns the name of the allocation given the input. +func AllocName(job, group string, idx uint) string { + return fmt.Sprintf("%s.%s[%d]", job, group, idx) +} + +// ACLPolicyListHash returns a consistent hash for a set of policies. +func ACLPolicyListHash(policies []*ACLPolicy) string { + cacheKeyHash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + for _, policy := range policies { + cacheKeyHash.Write([]byte(policy.Name)) + binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) + } + cacheKey := string(cacheKeyHash.Sum(nil)) + return cacheKey +} + +// CompileACLObject compiles a set of ACL policies into an ACL object with a cache +func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { + // Sort the policies to ensure consistent ordering + sort.Slice(policies, func(i, j int) bool { + return policies[i].Name < policies[j].Name + }) + + // Determine the cache key + cacheKey := ACLPolicyListHash(policies) + aclRaw, ok := cache.Get(cacheKey) + if ok { + return aclRaw.(*acl.ACL), nil + } + + // Parse the policies + parsed := make([]*acl.Policy, 0, len(policies)) + for _, policy := range policies { + p, err := acl.Parse(policy.Rules) + if err != nil { + return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) + } + parsed = append(parsed, p) + } + + // Create the ACL object + aclObj, err := acl.NewACL(false, parsed) + if err != nil { + return nil, fmt.Errorf("failed to construct ACL: %v", err) + } + + // Update the cache + cache.Add(cacheKey, aclObj) + return aclObj, nil +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go new file mode 100644 index 0000000000..3f0ebff4f0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go @@ -0,0 +1,326 @@ +package structs + +import ( + "fmt" + "math/rand" + "net" + "sync" +) + +const ( + // MinDynamicPort is the smallest dynamic port generated + MinDynamicPort = 20000 + + // MaxDynamicPort is the largest dynamic port generated + MaxDynamicPort = 32000 + + // maxRandPortAttempts is the maximum number of attempt + // to assign a random port + maxRandPortAttempts = 20 + + // maxValidPort is the max valid port number + maxValidPort = 65536 +) + +var ( + // bitmapPool is used to pool the bitmaps used for port collision + // checking. They are fairly large (8K) so we can re-use them to + // avoid GC pressure. Care should be taken to call Clear() on any + // bitmap coming from the pool. + bitmapPool = new(sync.Pool) +) + +// NetworkIndex is used to index the available network resources +// and the used network resources on a machine given allocations +type NetworkIndex struct { + AvailNetworks []*NetworkResource // List of available networks + AvailBandwidth map[string]int // Bandwidth by device + UsedPorts map[string]Bitmap // Ports by IP + UsedBandwidth map[string]int // Bandwidth by device +} + +// NewNetworkIndex is used to construct a new network index +func NewNetworkIndex() *NetworkIndex { + return &NetworkIndex{ + AvailBandwidth: make(map[string]int), + UsedPorts: make(map[string]Bitmap), + UsedBandwidth: make(map[string]int), + } +} + +// Release is called when the network index is no longer needed +// to attempt to re-use some of the memory it has allocated +func (idx *NetworkIndex) Release() { + for _, b := range idx.UsedPorts { + bitmapPool.Put(b) + } +} + +// Overcommitted checks if the network is overcommitted +func (idx *NetworkIndex) Overcommitted() bool { + for device, used := range idx.UsedBandwidth { + avail := idx.AvailBandwidth[device] + if used > avail { + return true + } + } + return false +} + +// SetNode is used to setup the available network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { + // Add the available CIDR blocks + for _, n := range node.Resources.Networks { + if n.Device != "" { + idx.AvailNetworks = append(idx.AvailNetworks, n) + idx.AvailBandwidth[n.Device] = n.MBits + } + } + + // Add the reserved resources + if r := node.Reserved; r != nil { + for _, n := range r.Networks { + if idx.AddReserved(n) { + collide = true + } + } + } + return +} + +// AddAllocs is used to add the used network resources. Returns +// true if there is a collision +func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { + for _, alloc := range allocs { + for _, task := range alloc.TaskResources { + if len(task.Networks) == 0 { + continue + } + n := task.Networks[0] + if idx.AddReserved(n) { + collide = true + } + } + } + return +} + +// AddReserved is used to add a reserved network usage, returns true +// if there is a port collision +func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { + // Add the port usage + used := idx.UsedPorts[n.IP] + if used == nil { + // Try to get a bitmap from the pool, else create + raw := bitmapPool.Get() + if raw != nil { + used = raw.(Bitmap) + used.Clear() + } else { + used, _ = NewBitmap(maxValidPort) + } + idx.UsedPorts[n.IP] = used + } + + for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} { + for _, port := range ports { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + return true + } + if used.Check(uint(port.Value)) { + collide = true + } else { + used.Set(uint(port.Value)) + } + } + } + + // Add the bandwidth + idx.UsedBandwidth[n.Device] += n.MBits + return +} + +// yieldIP is used to iteratively invoke the callback with +// an available IP +func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { + inc := func(ip net.IP) { + for j := len(ip) - 1; j >= 0; j-- { + ip[j]++ + if ip[j] > 0 { + break + } + } + } + + for _, n := range idx.AvailNetworks { + ip, ipnet, err := net.ParseCIDR(n.CIDR) + if err != nil { + continue + } + for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { + if cb(n, ip) { + return + } + } + } +} + +// AssignNetwork is used to assign network resources given an ask. +// If the ask cannot be satisfied, returns nil +func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) { + err = fmt.Errorf("no networks available") + idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) { + // Convert the IP to a string + ipStr := ip.String() + + // Check if we would exceed the bandwidth cap + availBandwidth := idx.AvailBandwidth[n.Device] + usedBandwidth := idx.UsedBandwidth[n.Device] + if usedBandwidth+ask.MBits > availBandwidth { + err = fmt.Errorf("bandwidth exceeded") + return + } + + used := idx.UsedPorts[ipStr] + + // Check if any of the reserved ports are in use + for _, port := range ask.ReservedPorts { + // Guard against invalid port + if port.Value < 0 || port.Value >= maxValidPort { + err = fmt.Errorf("invalid port %d (out of range)", port.Value) + return + } + + // Check if in use + if used != nil && used.Check(uint(port.Value)) { + err = fmt.Errorf("reserved port collision") + return + } + } + + // Create the offer + offer := &NetworkResource{ + Device: n.Device, + IP: ipStr, + MBits: ask.MBits, + ReservedPorts: ask.ReservedPorts, + DynamicPorts: ask.DynamicPorts, + } + + // Try to stochastically pick the dynamic ports as it is faster and + // lower memory usage. + var dynPorts []int + var dynErr error + dynPorts, dynErr = getDynamicPortsStochastic(used, ask) + if dynErr == nil { + goto BUILD_OFFER + } + + // Fall back to the precise method if the random sampling failed. + dynPorts, dynErr = getDynamicPortsPrecise(used, ask) + if dynErr != nil { + err = dynErr + return + } + + BUILD_OFFER: + for i, port := range dynPorts { + offer.DynamicPorts[i].Value = port + } + + // Stop, we have an offer! + out = offer + err = nil + return true + }) + return +} + +// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// means the ask can not be satisfied as the method does a precise search. +func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + // Create a copy of the used ports and apply the new reserves + var usedSet Bitmap + var err error + if nodeUsed != nil { + usedSet, err = nodeUsed.Copy() + if err != nil { + return nil, err + } + } else { + usedSet, err = NewBitmap(maxValidPort) + if err != nil { + return nil, err + } + } + + for _, port := range ask.ReservedPorts { + usedSet.Set(uint(port.Value)) + } + + // Get the indexes of the unset + availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort) + + // Randomize the amount we need + numDyn := len(ask.DynamicPorts) + if len(availablePorts) < numDyn { + return nil, fmt.Errorf("dynamic port selection failed") + } + + numAvailable := len(availablePorts) + for i := 0; i < numDyn; i++ { + j := rand.Intn(numAvailable) + availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i] + } + + return availablePorts[:numDyn], nil +} + +// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if +// no ports have been allocated yet, the network ask and returns a set of unused +// ports to fullfil the ask's DynamicPorts or an error if it failed. An error +// does not mean the ask can not be satisfied as the method has a fixed amount +// of random probes and if these fail, the search is aborted. +func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { + var reserved, dynamic []int + for _, port := range ask.ReservedPorts { + reserved = append(reserved, port.Value) + } + + for i := 0; i < len(ask.DynamicPorts); i++ { + attempts := 0 + PICK: + attempts++ + if attempts > maxRandPortAttempts { + return nil, fmt.Errorf("stochastic dynamic port selection failed") + } + + randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort) + if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { + goto PICK + } + + for _, ports := range [][]int{reserved, dynamic} { + if isPortReserved(ports, randPort) { + goto PICK + } + } + dynamic = append(dynamic, randPort) + } + + return dynamic, nil +} + +// IntContains scans an integer slice for a value +func isPortReserved(haystack []int, needle int) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go new file mode 100644 index 0000000000..aab0700550 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go @@ -0,0 +1,94 @@ +package structs + +import ( + "fmt" + "strings" + + "github.com/mitchellh/hashstructure" +) + +const ( + // NodeUniqueNamespace is a prefix that can be appended to node meta or + // attribute keys to mark them for exclusion in computed node class. + NodeUniqueNamespace = "unique." +) + +// UniqueNamespace takes a key and returns the key marked under the unique +// namespace. +func UniqueNamespace(key string) string { + return fmt.Sprintf("%s%s", NodeUniqueNamespace, key) +} + +// IsUniqueNamespace returns whether the key is under the unique namespace. +func IsUniqueNamespace(key string) bool { + return strings.HasPrefix(key, NodeUniqueNamespace) +} + +// ComputeClass computes a derived class for the node based on its attributes. +// ComputedClass is a unique id that identifies nodes with a common set of +// attributes and capabilities. Thus, when calculating a node's computed class +// we avoid including any uniquely identifing fields. +func (n *Node) ComputeClass() error { + hash, err := hashstructure.Hash(n, nil) + if err != nil { + return err + } + + n.ComputedClass = fmt.Sprintf("v1:%d", hash) + return nil +} + +// HashInclude is used to blacklist uniquely identifying node fields from being +// included in the computed node class. +func (n Node) HashInclude(field string, v interface{}) (bool, error) { + switch field { + case "Datacenter", "Attributes", "Meta", "NodeClass": + return true, nil + default: + return false, nil + } +} + +// HashIncludeMap is used to blacklist uniquely identifying node map keys from being +// included in the computed node class. +func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) { + key, ok := k.(string) + if !ok { + return false, fmt.Errorf("map key %v not a string", k) + } + + switch field { + case "Meta", "Attributes": + return !IsUniqueNamespace(key), nil + default: + return false, fmt.Errorf("unexpected map field: %v", field) + } +} + +// EscapedConstraints takes a set of constraints and returns the set that +// escapes computed node classes. +func EscapedConstraints(constraints []*Constraint) []*Constraint { + var escaped []*Constraint + for _, c := range constraints { + if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) { + escaped = append(escaped, c) + } + } + + return escaped +} + +// constraintTargetEscapes returns whether the target of a constraint escapes +// computed node class optimization. +func constraintTargetEscapes(target string) bool { + switch { + case strings.HasPrefix(target, "${node.unique."): + return true + case strings.HasPrefix(target, "${attr.unique."): + return true + case strings.HasPrefix(target, "${meta.unique."): + return true + default: + return false + } +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go new file mode 100644 index 0000000000..93b99f6fb7 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go @@ -0,0 +1,49 @@ +package structs + +import ( + "github.com/hashicorp/raft" +) + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Nomad. + ID raft.ServerID + + // Node is the node name of the server, as known by Nomad, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address raft.ServerAddress + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Nomad. + Voter bool +} + +// RaftConfigrationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft +// operation on a specific Raft peer by address in the form of "IP:port". +type RaftPeerByAddressRequest struct { + // Address is the peer to remove, in the form "IP:port". + Address raft.ServerAddress + + // WriteRequest holds the Region for this request. + WriteRequest +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go new file mode 100644 index 0000000000..1ca19e35d0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go @@ -0,0 +1,5783 @@ +package structs + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "golang.org/x/crypto/blake2b" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/args" + "github.com/mitchellh/copystructure" + "github.com/ugorji/go/codec" + + hcodec "github.com/hashicorp/go-msgpack/codec" +) + +var ( + ErrNoLeader = fmt.Errorf("No cluster leader") + ErrNoRegionPath = fmt.Errorf("No path to region") + ErrTokenNotFound = errors.New("ACL token not found") + ErrPermissionDenied = errors.New("Permission denied") + + // validPolicyName is used to validate a policy name + validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") +) + +type MessageType uint8 + +const ( + NodeRegisterRequestType MessageType = iota + NodeDeregisterRequestType + NodeUpdateStatusRequestType + NodeUpdateDrainRequestType + JobRegisterRequestType + JobDeregisterRequestType + EvalUpdateRequestType + EvalDeleteRequestType + AllocUpdateRequestType + AllocClientUpdateRequestType + ReconcileJobSummariesRequestType + VaultAccessorRegisterRequestType + VaultAccessorDegisterRequestType + ApplyPlanResultsRequestType + DeploymentStatusUpdateRequestType + DeploymentPromoteRequestType + DeploymentAllocHealthRequestType + DeploymentDeleteRequestType + JobStabilityRequestType + ACLPolicyUpsertRequestType + ACLPolicyDeleteRequestType + ACLTokenUpsertRequestType + ACLTokenDeleteRequestType + ACLTokenBootstrapRequestType +) + +const ( + // IgnoreUnknownTypeFlag is set along with a MessageType + // to indicate that the message type can be safely ignored + // if it is not recognized. This is for future proofing, so + // that new commands can be added in a way that won't cause + // old servers to crash when the FSM attempts to process them. + IgnoreUnknownTypeFlag MessageType = 128 + + // ApiMajorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed in a way + // that would break clients for sane client versioning. + ApiMajorVersion = 1 + + // ApiMinorVersion is returned as part of the Status.Version request. + // It should be incremented anytime the APIs are changed to allow + // for sane client versioning. Minor changes should be compatible + // within the major version. + ApiMinorVersion = 1 + + ProtocolVersion = "protocol" + APIMajorVersion = "api.major" + APIMinorVersion = "api.minor" + + GetterModeAny = "any" + GetterModeFile = "file" + GetterModeDir = "dir" + + // maxPolicyDescriptionLength limits a policy description length + maxPolicyDescriptionLength = 256 + + // maxTokenNameLength limits a ACL token name length + maxTokenNameLength = 64 + + // ACLClientToken and ACLManagementToken are the only types of tokens + ACLClientToken = "client" + ACLManagementToken = "management" + + // DefaultNamespace is the default namespace. + DefaultNamespace = "default" + DefaultNamespaceDescription = "Default shared namespace" +) + +// Context defines the scope in which a search for Nomad object operates, and +// is also used to query the matching index value for this context +type Context string + +const ( + Allocs Context = "allocs" + Deployments Context = "deployment" + Evals Context = "evals" + Jobs Context = "jobs" + Nodes Context = "nodes" + Namespaces Context = "namespaces" + All Context = "all" +) + +// NamespacedID is a tuple of an ID and a namespace +type NamespacedID struct { + ID string + Namespace string +} + +// RPCInfo is used to describe common information about query +type RPCInfo interface { + RequestRegion() string + IsRead() bool + AllowStaleRead() bool +} + +// QueryOptions is used to specify various flags for read queries +type QueryOptions struct { + // The target region for this query + Region string + + // Namespace is the target namespace for the query. + Namespace string + + // If set, wait until query exceeds given index. Must be provided + // with MaxQueryTime. + MinQueryIndex uint64 + + // Provided with MinQueryIndex to wait for change. + MaxQueryTime time.Duration + + // If set, any follower can service the request. Results + // may be arbitrarily stale. + AllowStale bool + + // If set, used as prefix for resource list searches + Prefix string + + // SecretID is secret portion of the ACL token used for the request + SecretID string +} + +func (q QueryOptions) RequestRegion() string { + return q.Region +} + +func (q QueryOptions) RequestNamespace() string { + if q.Namespace == "" { + return DefaultNamespace + } + return q.Namespace +} + +// QueryOption only applies to reads, so always true +func (q QueryOptions) IsRead() bool { + return true +} + +func (q QueryOptions) AllowStaleRead() bool { + return q.AllowStale +} + +type WriteRequest struct { + // The target region for this write + Region string + + // Namespace is the target namespace for the write. + Namespace string + + // SecretID is secret portion of the ACL token used for the request + SecretID string +} + +func (w WriteRequest) RequestRegion() string { + // The target region for this request + return w.Region +} + +func (w WriteRequest) RequestNamespace() string { + if w.Namespace == "" { + return DefaultNamespace + } + return w.Namespace +} + +// WriteRequest only applies to writes, always false +func (w WriteRequest) IsRead() bool { + return false +} + +func (w WriteRequest) AllowStaleRead() bool { + return false +} + +// QueryMeta allows a query response to include potentially +// useful metadata about a query +type QueryMeta struct { + // This is the index associated with the read + Index uint64 + + // If AllowStale is used, this is time elapsed since + // last contact between the follower and leader. This + // can be used to gauge staleness. + LastContact time.Duration + + // Used to indicate if there is a known leader node + KnownLeader bool +} + +// WriteMeta allows a write response to include potentially +// useful metadata about the write +type WriteMeta struct { + // This is the index associated with the write + Index uint64 +} + +// NodeRegisterRequest is used for Node.Register endpoint +// to register a node as being a schedulable entity. +type NodeRegisterRequest struct { + Node *Node + WriteRequest +} + +// NodeDeregisterRequest is used for Node.Deregister endpoint +// to deregister a node as being a schedulable entity. +type NodeDeregisterRequest struct { + NodeID string + WriteRequest +} + +// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server +// information used in RPC server lists. +type NodeServerInfo struct { + // RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to + // be contacted at for RPCs. + RPCAdvertiseAddr string + + // RpcMajorVersion is the major version number the Nomad Server + // supports + RPCMajorVersion int32 + + // RpcMinorVersion is the minor version number the Nomad Server + // supports + RPCMinorVersion int32 + + // Datacenter is the datacenter that a Nomad server belongs to + Datacenter string +} + +// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint +// to update the status of a node. +type NodeUpdateStatusRequest struct { + NodeID string + Status string + WriteRequest +} + +// NodeUpdateDrainRequest is used for updatin the drain status +type NodeUpdateDrainRequest struct { + NodeID string + Drain bool + WriteRequest +} + +// NodeEvaluateRequest is used to re-evaluate the ndoe +type NodeEvaluateRequest struct { + NodeID string + WriteRequest +} + +// NodeSpecificRequest is used when we just need to specify a target node +type NodeSpecificRequest struct { + NodeID string + SecretID string + QueryOptions +} + +// SearchResponse is used to return matches and information about whether +// the match list is truncated specific to each type of context. +type SearchResponse struct { + // Map of context types to ids which match a specified prefix + Matches map[Context][]string + + // Truncations indicates whether the matches for a particular context have + // been truncated + Truncations map[Context]bool + + QueryMeta +} + +// SearchRequest is used to parameterize a request, and returns a +// list of matches made up of jobs, allocations, evaluations, and/or nodes, +// along with whether or not the information returned is truncated. +type SearchRequest struct { + // Prefix is what ids are matched to. I.e, if the given prefix were + // "a", potential matches might be "abcd" or "aabb" + Prefix string + + // Context is the type that can be matched against. A context can be a job, + // node, evaluation, allocation, or empty (indicated every context should be + // matched) + Context Context + + QueryOptions +} + +// JobRegisterRequest is used for Job.Register endpoint +// to register a job as being a schedulable entity. +type JobRegisterRequest struct { + Job *Job + + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + + WriteRequest +} + +// JobDeregisterRequest is used for Job.Deregister endpoint +// to deregister a job as being a schedulable entity. +type JobDeregisterRequest struct { + JobID string + + // Purge controls whether the deregister purges the job from the system or + // whether the job is just marked as stopped and will be removed by the + // garbage collector + Purge bool + + WriteRequest +} + +// JobEvaluateRequest is used when we just need to re-evaluate a target job +type JobEvaluateRequest struct { + JobID string + WriteRequest +} + +// JobSpecificRequest is used when we just need to specify a target job +type JobSpecificRequest struct { + JobID string + AllAllocs bool + QueryOptions +} + +// JobListRequest is used to parameterize a list request +type JobListRequest struct { + QueryOptions +} + +// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run +// evaluation of the Job. +type JobPlanRequest struct { + Job *Job + Diff bool // Toggles an annotated diff + // PolicyOverride is set when the user is attempting to override any policies + PolicyOverride bool + WriteRequest +} + +// JobSummaryRequest is used when we just need to get a specific job summary +type JobSummaryRequest struct { + JobID string + QueryOptions +} + +// JobDispatchRequest is used to dispatch a job based on a parameterized job +type JobDispatchRequest struct { + JobID string + Payload []byte + Meta map[string]string + WriteRequest +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobRevertRequest is used to revert a job to a prior version. +type JobRevertRequest struct { + // JobID is the ID of the job being reverted + JobID string + + // JobVersion the version to revert to. + JobVersion uint64 + + // EnforcePriorVersion if set will enforce that the job is at the given + // version before reverting. + EnforcePriorVersion *uint64 + + WriteRequest +} + +// JobStabilityRequest is used to marked a job as stable. +type JobStabilityRequest struct { + // Job to set the stability on + JobID string + JobVersion uint64 + + // Set the stability + Stable bool + WriteRequest +} + +// JobStabilityResponse is the response when marking a job as stable. +type JobStabilityResponse struct { + WriteMeta +} + +// NodeListRequest is used to parameterize a list request +type NodeListRequest struct { + QueryOptions +} + +// EvalUpdateRequest is used for upserting evaluations. +type EvalUpdateRequest struct { + Evals []*Evaluation + EvalToken string + WriteRequest +} + +// EvalDeleteRequest is used for deleting an evaluation. +type EvalDeleteRequest struct { + Evals []string + Allocs []string + WriteRequest +} + +// EvalSpecificRequest is used when we just need to specify a target evaluation +type EvalSpecificRequest struct { + EvalID string + QueryOptions +} + +// EvalAckRequest is used to Ack/Nack a specific evaluation +type EvalAckRequest struct { + EvalID string + Token string + WriteRequest +} + +// EvalDequeueRequest is used when we want to dequeue an evaluation +type EvalDequeueRequest struct { + Schedulers []string + Timeout time.Duration + SchedulerVersion uint16 + WriteRequest +} + +// EvalListRequest is used to list the evaluations +type EvalListRequest struct { + QueryOptions +} + +// PlanRequest is used to submit an allocation plan to the leader +type PlanRequest struct { + Plan *Plan + WriteRequest +} + +// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction +// committing the result of a plan. +type ApplyPlanResultsRequest struct { + // AllocUpdateRequest holds the allocation updates to be made by the + // scheduler. + AllocUpdateRequest + + // Deployment is the deployment created or updated as a result of a + // scheduling event. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate +} + +// AllocUpdateRequest is used to submit changes to allocations, either +// to cause evictions or to assign new allocaitons. Both can be done +// within a single transaction +type AllocUpdateRequest struct { + // Alloc is the list of new allocations to assign + Alloc []*Allocation + + // Job is the shared parent job of the allocations. + // It is pulled out since it is common to reduce payload size. + Job *Job + + WriteRequest +} + +// AllocListRequest is used to request a list of allocations +type AllocListRequest struct { + QueryOptions +} + +// AllocSpecificRequest is used to query a specific allocation +type AllocSpecificRequest struct { + AllocID string + QueryOptions +} + +// AllocsGetRequest is used to query a set of allocations +type AllocsGetRequest struct { + AllocIDs []string + QueryOptions +} + +// PeriodicForceReqeuest is used to force a specific periodic job. +type PeriodicForceRequest struct { + JobID string + WriteRequest +} + +// ServerMembersResponse has the list of servers in a cluster +type ServerMembersResponse struct { + ServerName string + ServerRegion string + ServerDC string + Members []*ServerMember +} + +// ServerMember holds information about a Nomad server agent in a cluster +type ServerMember struct { + Name string + Addr net.IP + Port uint16 + Tags map[string]string + Status string + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the +// following tasks in the given allocation +type DeriveVaultTokenRequest struct { + NodeID string + SecretID string + AllocID string + Tasks []string + QueryOptions +} + +// VaultAccessorsRequest is used to operate on a set of Vault accessors +type VaultAccessorsRequest struct { + Accessors []*VaultAccessor +} + +// VaultAccessor is a reference to a created Vault token on behalf of +// an allocation's task. +type VaultAccessor struct { + AllocID string + Task string + NodeID string + Accessor string + CreationTTL int + + // Raft Indexes + CreateIndex uint64 +} + +// DeriveVaultTokenResponse returns the wrapped tokens for each requested task +type DeriveVaultTokenResponse struct { + // Tasks is a mapping between the task name and the wrapped token + Tasks map[string]string + + // Error stores any error that occurred. Errors are stored here so we can + // communicate whether it is retriable + Error *RecoverableError + + QueryMeta +} + +// GenericRequest is used to request where no +// specific information is needed. +type GenericRequest struct { + QueryOptions +} + +// DeploymentListRequest is used to list the deployments +type DeploymentListRequest struct { + QueryOptions +} + +// DeploymentDeleteRequest is used for deleting deployments. +type DeploymentDeleteRequest struct { + Deployments []string + WriteRequest +} + +// DeploymentStatusUpdateRequest is used to update the status of a deployment as +// well as optionally creating an evaluation atomically. +type DeploymentStatusUpdateRequest struct { + // Eval, if set, is used to create an evaluation at the same time as + // updating the status of a deployment. + Eval *Evaluation + + // DeploymentUpdate is a status update to apply to the given + // deployment. + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job +} + +// DeploymentAllocHealthRequest is used to set the health of a set of +// allocations as part of a deployment. +type DeploymentAllocHealthRequest struct { + DeploymentID string + + // Marks these allocations as healthy, allow further allocations + // to be rolled. + HealthyAllocationIDs []string + + // Any unhealthy allocations fail the deployment + UnhealthyAllocationIDs []string + + WriteRequest +} + +// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft +type ApplyDeploymentAllocHealthRequest struct { + DeploymentAllocHealthRequest + + // An optional field to update the status of a deployment + DeploymentUpdate *DeploymentStatusUpdate + + // Job is used to optionally upsert a job. This is used when setting the + // allocation health results in a deployment failure and the deployment + // auto-reverts to the latest stable job. + Job *Job + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPromoteRequest is used to promote task groups in a deployment +type DeploymentPromoteRequest struct { + DeploymentID string + + // All is to promote all task groups + All bool + + // Groups is used to set the promotion status per task group + Groups []string + + WriteRequest +} + +// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft +type ApplyDeploymentPromoteRequest struct { + DeploymentPromoteRequest + + // An optional evaluation to create after promoting the canaries + Eval *Evaluation +} + +// DeploymentPauseRequest is used to pause a deployment +type DeploymentPauseRequest struct { + DeploymentID string + + // Pause sets the pause status + Pause bool + + WriteRequest +} + +// DeploymentSpecificRequest is used to make a request specific to a particular +// deployment +type DeploymentSpecificRequest struct { + DeploymentID string + QueryOptions +} + +// DeploymentFailRequest is used to fail a particular deployment +type DeploymentFailRequest struct { + DeploymentID string + WriteRequest +} + +// SingleDeploymentResponse is used to respond with a single deployment +type SingleDeploymentResponse struct { + Deployment *Deployment + QueryMeta +} + +// GenericResponse is used to respond to a request where no +// specific response information is needed. +type GenericResponse struct { + WriteMeta +} + +// VersionResponse is used for the Status.Version reseponse +type VersionResponse struct { + Build string + Versions map[string]int + QueryMeta +} + +// JobRegisterResponse is used to respond to a job registration +type JobRegisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + QueryMeta +} + +// JobDeregisterResponse is used to respond to a job deregistration +type JobDeregisterResponse struct { + EvalID string + EvalCreateIndex uint64 + JobModifyIndex uint64 + QueryMeta +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string + + // Error is a string version of any error that may have occurred + Error string + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string +} + +// NodeUpdateResponse is used to respond to a node update +type NodeUpdateResponse struct { + HeartbeatTTL time.Duration + EvalIDs []string + EvalCreateIndex uint64 + NodeModifyIndex uint64 + + // LeaderRPCAddr is the RPC address of the current Raft Leader. If + // empty, the current Nomad Server is in the minority of a partition. + LeaderRPCAddr string + + // NumNodes is the number of Nomad nodes attached to this quorum of + // Nomad Servers at the time of the response. This value can + // fluctuate based on the health of the cluster between heartbeats. + NumNodes int32 + + // Servers is the full list of known Nomad servers in the local + // region. + Servers []*NodeServerInfo + + QueryMeta +} + +// NodeDrainUpdateResponse is used to respond to a node drain update +type NodeDrainUpdateResponse struct { + EvalIDs []string + EvalCreateIndex uint64 + NodeModifyIndex uint64 + QueryMeta +} + +// NodeAllocsResponse is used to return allocs for a single node +type NodeAllocsResponse struct { + Allocs []*Allocation + QueryMeta +} + +// NodeClientAllocsResponse is used to return allocs meta data for a single node +type NodeClientAllocsResponse struct { + Allocs map[string]uint64 + QueryMeta +} + +// SingleNodeResponse is used to return a single node +type SingleNodeResponse struct { + Node *Node + QueryMeta +} + +// NodeListResponse is used for a list request +type NodeListResponse struct { + Nodes []*NodeListStub + QueryMeta +} + +// SingleJobResponse is used to return a single job +type SingleJobResponse struct { + Job *Job + QueryMeta +} + +// JobSummaryResponse is used to return a single job summary +type JobSummaryResponse struct { + JobSummary *JobSummary + QueryMeta +} + +type JobDispatchResponse struct { + DispatchedJobID string + EvalID string + EvalCreateIndex uint64 + JobCreateIndex uint64 + WriteMeta +} + +// JobListResponse is used for a list request +type JobListResponse struct { + Jobs []*JobListStub + QueryMeta +} + +// JobVersionsRequest is used to get a jobs versions +type JobVersionsRequest struct { + JobID string + Diffs bool + QueryOptions +} + +// JobVersionsResponse is used for a job get versions request +type JobVersionsResponse struct { + Versions []*Job + Diffs []*JobDiff + QueryMeta +} + +// JobPlanResponse is used to respond to a job plan request +type JobPlanResponse struct { + // Annotations stores annotations explaining decisions the scheduler made. + Annotations *PlanAnnotations + + // FailedTGAllocs is the placement failures per task group. + FailedTGAllocs map[string]*AllocMetric + + // JobModifyIndex is the modification index of the job. The value can be + // used when running `nomad run` to ensure that the Job wasn’t modified + // since the last plan. If the job is being created, the value is zero. + JobModifyIndex uint64 + + // CreatedEvals is the set of evaluations created by the scheduler. The + // reasons for this can be rolling-updates or blocked evals. + CreatedEvals []*Evaluation + + // Diff contains the diff of the job and annotations on whether the change + // causes an in-place update or create/destroy + Diff *JobDiff + + // NextPeriodicLaunch is the time duration till the job would be launched if + // submitted. + NextPeriodicLaunch time.Time + + // Warnings contains any warnings about the given job. These may include + // deprecation warnings. + Warnings string + + WriteMeta +} + +// SingleAllocResponse is used to return a single allocation +type SingleAllocResponse struct { + Alloc *Allocation + QueryMeta +} + +// AllocsGetResponse is used to return a set of allocations +type AllocsGetResponse struct { + Allocs []*Allocation + QueryMeta +} + +// JobAllocationsResponse is used to return the allocations for a job +type JobAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// JobEvaluationsResponse is used to return the evaluations for a job +type JobEvaluationsResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// SingleEvalResponse is used to return a single evaluation +type SingleEvalResponse struct { + Eval *Evaluation + QueryMeta +} + +// EvalDequeueResponse is used to return from a dequeue +type EvalDequeueResponse struct { + Eval *Evaluation + Token string + + // WaitIndex is the Raft index the worker should wait until invoking the + // scheduler. + WaitIndex uint64 + + QueryMeta +} + +// GetWaitIndex is used to retrieve the Raft index in which state should be at +// or beyond before invoking the scheduler. +func (e *EvalDequeueResponse) GetWaitIndex() uint64 { + // Prefer the wait index sent. This will be populated on all responses from + // 0.7.0 and above + if e.WaitIndex != 0 { + return e.WaitIndex + } else if e.Eval != nil { + return e.Eval.ModifyIndex + } + + // This should never happen + return 1 +} + +// PlanResponse is used to return from a PlanRequest +type PlanResponse struct { + Result *PlanResult + WriteMeta +} + +// AllocListResponse is used for a list request +type AllocListResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// DeploymentListResponse is used for a list request +type DeploymentListResponse struct { + Deployments []*Deployment + QueryMeta +} + +// EvalListResponse is used for a list request +type EvalListResponse struct { + Evaluations []*Evaluation + QueryMeta +} + +// EvalAllocationsResponse is used to return the allocations for an evaluation +type EvalAllocationsResponse struct { + Allocations []*AllocListStub + QueryMeta +} + +// PeriodicForceResponse is used to respond to a periodic job force launch +type PeriodicForceResponse struct { + EvalID string + EvalCreateIndex uint64 + WriteMeta +} + +// DeploymentUpdateResponse is used to respond to a deployment change. The +// response will include the modify index of the deployment as well as details +// of any triggered evaluation. +type DeploymentUpdateResponse struct { + EvalID string + EvalCreateIndex uint64 + DeploymentModifyIndex uint64 + + // RevertedJobVersion is the version the job was reverted to. If unset, the + // job wasn't reverted + RevertedJobVersion *uint64 + + WriteMeta +} + +const ( + NodeStatusInit = "initializing" + NodeStatusReady = "ready" + NodeStatusDown = "down" +) + +// ShouldDrainNode checks if a given node status should trigger an +// evaluation. Some states don't require any further action. +func ShouldDrainNode(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady: + return false + case NodeStatusDown: + return true + default: + panic(fmt.Sprintf("unhandled node status %s", status)) + } +} + +// ValidNodeStatus is used to check if a node status is valid +func ValidNodeStatus(status string) bool { + switch status { + case NodeStatusInit, NodeStatusReady, NodeStatusDown: + return true + default: + return false + } +} + +// Node is a representation of a schedulable client node +type Node struct { + // ID is a unique identifier for the node. It can be constructed + // by doing a concatenation of the Name and Datacenter as a simple + // approach. Alternatively a UUID may be used. + ID string + + // SecretID is an ID that is only known by the Node and the set of Servers. + // It is not accessible via the API and is used to authenticate nodes + // conducting priviledged activities. + SecretID string + + // Datacenter for this node + Datacenter string + + // Node name + Name string + + // HTTPAddr is the address on which the Nomad client is listening for http + // requests + HTTPAddr string + + // TLSEnabled indicates if the Agent has TLS enabled for the HTTP API + TLSEnabled bool + + // Attributes is an arbitrary set of key/value + // data that can be used for constraints. Examples + // include "kernel.name=linux", "arch=386", "driver.docker=1", + // "docker.runtime=1.8.3" + Attributes map[string]string + + // Resources is the available resources on the client. + // For example 'cpu=2' 'memory=2048' + Resources *Resources + + // Reserved is the set of resources that are reserved, + // and should be subtracted from the total resources for + // the purposes of scheduling. This may be provide certain + // high-watermark tolerances or because of external schedulers + // consuming resources. + Reserved *Resources + + // Links are used to 'link' this client to external + // systems. For example 'consul=foo.dc1' 'aws=i-83212' + // 'ami=ami-123' + Links map[string]string + + // Meta is used to associate arbitrary metadata with this + // client. This is opaque to Nomad. + Meta map[string]string + + // NodeClass is an opaque identifier used to group nodes + // together for the purpose of determining scheduling pressure. + NodeClass string + + // ComputedClass is a unique id that identifies nodes with a common set of + // attributes and capabilities. + ComputedClass string + + // Drain is controlled by the servers, and not the client. + // If true, no jobs will be scheduled to this node, and existing + // allocations will be drained. + Drain bool + + // Status of this node + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // StatusUpdatedAt is the time stamp at which the state of the node was + // updated + StatusUpdatedAt int64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Ready returns if the node is ready for running allocations +func (n *Node) Ready() bool { + return n.Status == NodeStatusReady && !n.Drain +} + +func (n *Node) Copy() *Node { + if n == nil { + return nil + } + nn := new(Node) + *nn = *n + nn.Attributes = helper.CopyMapStringString(nn.Attributes) + nn.Resources = nn.Resources.Copy() + nn.Reserved = nn.Reserved.Copy() + nn.Links = helper.CopyMapStringString(nn.Links) + nn.Meta = helper.CopyMapStringString(nn.Meta) + return nn +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (n *Node) TerminalStatus() bool { + switch n.Status { + case NodeStatusDown: + return true + default: + return false + } +} + +// Stub returns a summarized version of the node +func (n *Node) Stub() *NodeListStub { + return &NodeListStub{ + ID: n.ID, + Datacenter: n.Datacenter, + Name: n.Name, + NodeClass: n.NodeClass, + Version: n.Attributes["nomad.version"], + Drain: n.Drain, + Status: n.Status, + StatusDescription: n.StatusDescription, + CreateIndex: n.CreateIndex, + ModifyIndex: n.ModifyIndex, + } +} + +// NodeListStub is used to return a subset of job information +// for the job list +type NodeListStub struct { + ID string + Datacenter string + Name string + NodeClass string + Version string + Drain bool + Status string + StatusDescription string + CreateIndex uint64 + ModifyIndex uint64 +} + +// Networks defined for a task on the Resources struct. +type Networks []*NetworkResource + +// Port assignment and IP for the given label or empty values. +func (ns Networks) Port(label string) (string, int) { + for _, n := range ns { + for _, p := range n.ReservedPorts { + if p.Label == label { + return n.IP, p.Value + } + } + for _, p := range n.DynamicPorts { + if p.Label == label { + return n.IP, p.Value + } + } + } + return "", 0 +} + +// Resources is used to define the resources available +// on a client +type Resources struct { + CPU int + MemoryMB int + DiskMB int + IOPS int + Networks Networks +} + +const ( + BytesInMegabyte = 1024 * 1024 +) + +// DefaultResources returns the default resources for a task. +func DefaultResources() *Resources { + return &Resources{ + CPU: 100, + MemoryMB: 10, + IOPS: 0, + } +} + +// DiskInBytes returns the amount of disk resources in bytes. +func (r *Resources) DiskInBytes() int64 { + return int64(r.DiskMB * BytesInMegabyte) +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other.CPU != 0 { + r.CPU = other.CPU + } + if other.MemoryMB != 0 { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != 0 { + r.DiskMB = other.DiskMB + } + if other.IOPS != 0 { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + +func (r *Resources) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(r.Networks) == 0 { + r.Networks = nil + } + + for _, n := range r.Networks { + n.Canonicalize() + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +func (r *Resources) MeetsMinResources() error { + var mErr multierror.Error + if r.CPU < 20 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU)) + } + if r.MemoryMB < 10 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB)) + } + if r.IOPS < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS)) + } + for i, n := range r.Networks { + if err := n.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err)) + } + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the resources +func (r *Resources) Copy() *Resources { + if r == nil { + return nil + } + newR := new(Resources) + *newR = *r + if r.Networks != nil { + n := len(r.Networks) + newR.Networks = make([]*NetworkResource, n) + for i := 0; i < n; i++ { + newR.Networks[i] = r.Networks[i].Copy() + } + } + return newR +} + +// NetIndex finds the matching net index using device name +func (r *Resources) NetIndex(n *NetworkResource) int { + for idx, net := range r.Networks { + if net.Device == n.Device { + return idx + } + } + return -1 +} + +// Superset checks if one set of resources is a superset +// of another. This ignores network resources, and the NetworkIndex +// should be used for that. +func (r *Resources) Superset(other *Resources) (bool, string) { + if r.CPU < other.CPU { + return false, "cpu exhausted" + } + if r.MemoryMB < other.MemoryMB { + return false, "memory exhausted" + } + if r.DiskMB < other.DiskMB { + return false, "disk exhausted" + } + if r.IOPS < other.IOPS { + return false, "iops exhausted" + } + return true, "" +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (r *Resources) Add(delta *Resources) error { + if delta == nil { + return nil + } + r.CPU += delta.CPU + r.MemoryMB += delta.MemoryMB + r.DiskMB += delta.DiskMB + r.IOPS += delta.IOPS + + for _, n := range delta.Networks { + // Find the matching interface by IP or CIDR + idx := r.NetIndex(n) + if idx == -1 { + r.Networks = append(r.Networks, n.Copy()) + } else { + r.Networks[idx].Add(n) + } + } + return nil +} + +func (r *Resources) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +type Port struct { + Label string + Value int +} + +// NetworkResource is used to represent available network +// resources +type NetworkResource struct { + Device string // Name of the device + CIDR string // CIDR block of addresses + IP string // Host IP address + MBits int // Throughput + ReservedPorts []Port // Host Reserved ports + DynamicPorts []Port // Host Dynamically assigned ports +} + +func (n *NetworkResource) Canonicalize() { + // Ensure that an empty and nil slices are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(n.ReservedPorts) == 0 { + n.ReservedPorts = nil + } + if len(n.DynamicPorts) == 0 { + n.DynamicPorts = nil + } +} + +// MeetsMinResources returns an error if the resources specified are less than +// the minimum allowed. +func (n *NetworkResource) MeetsMinResources() error { + var mErr multierror.Error + if n.MBits < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits)) + } + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the network resource +func (n *NetworkResource) Copy() *NetworkResource { + if n == nil { + return nil + } + newR := new(NetworkResource) + *newR = *n + if n.ReservedPorts != nil { + newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) + copy(newR.ReservedPorts, n.ReservedPorts) + } + if n.DynamicPorts != nil { + newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) + copy(newR.DynamicPorts, n.DynamicPorts) + } + return newR +} + +// Add adds the resources of the delta to this, potentially +// returning an error if not possible. +func (n *NetworkResource) Add(delta *NetworkResource) { + if len(delta.ReservedPorts) > 0 { + n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) + } + n.MBits += delta.MBits + n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) +} + +func (n *NetworkResource) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +// PortLabels returns a map of port labels to their assigned host ports. +func (n *NetworkResource) PortLabels() map[string]int { + num := len(n.ReservedPorts) + len(n.DynamicPorts) + labelValues := make(map[string]int, num) + for _, port := range n.ReservedPorts { + labelValues[port.Label] = port.Value + } + for _, port := range n.DynamicPorts { + labelValues[port.Label] = port.Value + } + return labelValues +} + +const ( + // JobTypeNomad is reserved for internal system tasks and is + // always handled by the CoreScheduler. + JobTypeCore = "_core" + JobTypeService = "service" + JobTypeBatch = "batch" + JobTypeSystem = "system" +) + +const ( + JobStatusPending = "pending" // Pending means the job is waiting on scheduling + JobStatusRunning = "running" // Running means the job has non-terminal allocations + JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal +) + +const ( + // JobMinPriority is the minimum allowed priority + JobMinPriority = 1 + + // JobDefaultPriority is the default priority if not + // not specified. + JobDefaultPriority = 50 + + // JobMaxPriority is the maximum allowed priority + JobMaxPriority = 100 + + // Ensure CoreJobPriority is higher than any user + // specified job so that it gets priority. This is important + // for the system to remain healthy. + CoreJobPriority = JobMaxPriority * 2 + + // JobTrackedVersions is the number of historic job versions that are + // kept. + JobTrackedVersions = 6 +) + +// Job is the scope of a scheduling request to Nomad. It is the largest +// scoped object, and is a named collection of task groups. Each task group +// is further composed of tasks. A task group (TG) is the unit of scheduling +// however. +type Job struct { + // Stop marks whether the user has stopped the job. A stopped job will + // have all created allocations stopped and acts as a way to stop a job + // without purging it from the system. This allows existing allocs to be + // queried and the job to be inspected as it is being killed. + Stop bool + + // Region is the Nomad region that handles scheduling this job + Region string + + // Namespace is the namespace the job is submitted into. + Namespace string + + // ID is a unique identifier for the job per region. It can be + // specified hierarchically like LineOfBiz/OrgName/Team/Project + ID string + + // ParentID is the unique identifier of the job that spawned this job. + ParentID string + + // Name is the logical name of the job used to refer to it. This is unique + // per region, but not unique globally. + Name string + + // Type is used to control various behaviors about the job. Most jobs + // are service jobs, meaning they are expected to be long lived. + // Some jobs are batch oriented meaning they run and then terminate. + // This can be extended in the future to support custom schedulers. + Type string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. This + // can slow down larger jobs if resources are not available. + AllAtOnce bool + + // Datacenters contains all the datacenters this job is allowed to span + Datacenters []string + + // Constraints can be specified at a job level and apply to + // all the task groups and tasks. + Constraints []*Constraint + + // TaskGroups are the collections of task groups that this job needs + // to run. Each task group is an atomic unit of scheduling and placement. + TaskGroups []*TaskGroup + + // COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0. + Update UpdateStrategy + + // Periodic is used to define the interval the job is run at. + Periodic *PeriodicConfig + + // ParameterizedJob is used to specify the job as a parameterized job + // for dispatching. + ParameterizedJob *ParameterizedJobConfig + + // Payload is the payload supplied when the job was dispatched. + Payload []byte + + // Meta is used to associate arbitrary metadata with this + // job. This is opaque to Nomad. + Meta map[string]string + + // VaultToken is the Vault token that proves the submitter of the job has + // access to the specified Vault policies. This field is only used to + // transfer the token and is not stored after Job submission. + VaultToken string + + // Job status + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Stable marks a job as stable. Stability is only defined on "service" and + // "system" jobs. The stability of a job will be set automatically as part + // of a deployment and can be manually set via APIs. + Stable bool + + // Version is a monitonically increasing version number that is incremened + // on each job register. + Version uint64 + + // SubmitTime is the time at which the job was submitted as a UnixNano in + // UTC + SubmitTime int64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 +} + +// Canonicalize is used to canonicalize fields in the Job. This should be called +// when registering a Job. A set of warnings are returned if the job was changed +// in anyway that the user should be made aware of. +func (j *Job) Canonicalize() (warnings error) { + if j == nil { + return nil + } + + var mErr multierror.Error + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(j.Meta) == 0 { + j.Meta = nil + } + + // Ensure the job is in a namespace. + if j.Namespace == "" { + j.Namespace = DefaultNamespace + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(j) + } + + if j.ParameterizedJob != nil { + j.ParameterizedJob.Canonicalize() + } + + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + + // COMPAT: Remove in 0.7.0 + // Rewrite any job that has an update block with pre 0.6.0 syntax. + jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0 + if jobHasOldUpdate && j.Type != JobTypeBatch { + // Build an appropriate update block and copy it down to each task group + base := DefaultUpdateStrategy.Copy() + base.MaxParallel = j.Update.MaxParallel + base.MinHealthyTime = j.Update.Stagger + + // Add to each task group, modifying as needed + upgraded := false + l := len(j.TaskGroups) + for _, tg := range j.TaskGroups { + // The task group doesn't need upgrading if it has an update block with the new syntax + u := tg.Update + if u != nil && u.Stagger > 0 && u.MaxParallel > 0 && + u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 { + continue + } + + upgraded = true + + // The MaxParallel for the job should be 10% of the total count + // unless there is just one task group then we can infer the old + // max parallel should be the new + tgu := base.Copy() + if l != 1 { + // RoundTo 10% + var percent float64 = float64(tg.Count) * 0.1 + tgu.MaxParallel = int(percent + 0.5) + } + + // Safety guards + if tgu.MaxParallel == 0 { + tgu.MaxParallel = 1 + } else if tgu.MaxParallel > tg.Count { + tgu.MaxParallel = tg.Count + } + + tg.Update = tgu + } + + if upgraded { + w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " + + "Please update upgrade stanza before v0.7.0." + multierror.Append(&mErr, fmt.Errorf(w)) + } + } + + // Ensure that the batch job doesn't have new style or old style update + // stanza. Unfortunately are scanning here because we have to deprecate over + // a release so we can't check in the task group since that may be new style + // but wouldn't capture the old style and we don't want to have duplicate + // warnings. + if j.Type == JobTypeBatch { + displayWarning := jobHasOldUpdate + j.Update.Stagger = 0 + j.Update.MaxParallel = 0 + j.Update.HealthCheck = "" + j.Update.MinHealthyTime = 0 + j.Update.HealthyDeadline = 0 + j.Update.AutoRevert = false + j.Update.Canary = 0 + + // Remove any update spec from the task groups + for _, tg := range j.TaskGroups { + if tg.Update != nil { + displayWarning = true + tg.Update = nil + } + } + + if displayWarning { + w := "Update stanza is disallowed for batch jobs since v0.6.0. " + + "The update block has automatically been removed" + multierror.Append(&mErr, fmt.Errorf(w)) + } + } + + return mErr.ErrorOrNil() +} + +// Copy returns a deep copy of the Job. It is expected that callers use recover. +// This job can panic if the deep copy failed as it uses reflection. +func (j *Job) Copy() *Job { + if j == nil { + return nil + } + nj := new(Job) + *nj = *j + nj.Datacenters = helper.CopySliceString(nj.Datacenters) + nj.Constraints = CopySliceConstraints(nj.Constraints) + + if j.TaskGroups != nil { + tgs := make([]*TaskGroup, len(nj.TaskGroups)) + for i, tg := range nj.TaskGroups { + tgs[i] = tg.Copy() + } + nj.TaskGroups = tgs + } + + nj.Periodic = nj.Periodic.Copy() + nj.Meta = helper.CopyMapStringString(nj.Meta) + nj.ParameterizedJob = nj.ParameterizedJob.Copy() + return nj +} + +// Validate is used to sanity check a job input +func (j *Job) Validate() error { + var mErr multierror.Error + + if j.Region == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) + } + if j.ID == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job ID")) + } else if strings.Contains(j.ID, " ") { + mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space")) + } + if j.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing job name")) + } + if j.Namespace == "" { + mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) + } + switch j.Type { + case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem: + case "": + mErr.Errors = append(mErr.Errors, errors.New("Missing job type")) + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type)) + } + if j.Priority < JobMinPriority || j.Priority > JobMaxPriority { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority)) + } + if len(j.Datacenters) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters")) + } + if len(j.TaskGroups) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups")) + } + for idx, constr := range j.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Check for duplicate task groups + taskGroups := make(map[string]int) + for idx, tg := range j.TaskGroups { + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1)) + } else if existing, ok := taskGroups[tg.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1)) + } else { + taskGroups[tg.Name] = idx + } + + if j.Type == "system" && tg.Count > 1 { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler", + tg.Name, tg.Count)) + } + } + + // Validate the task group + for _, tg := range j.TaskGroups { + if err := tg.Validate(j); err != nil { + outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Validate periodic is only used with batch jobs. + if j.IsPeriodic() && j.Periodic.Enabled { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.Periodic.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + if j.IsParameterized() { + if j.Type != JobTypeBatch { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch)) + } + + if err := j.ParameterizedJob.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (j *Job) Warnings() error { + var mErr multierror.Error + + // Check the groups + for _, tg := range j.TaskGroups { + if err := tg.Warnings(j); err != nil { + outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTaskGroup finds a task group by name +func (j *Job) LookupTaskGroup(name string) *TaskGroup { + for _, tg := range j.TaskGroups { + if tg.Name == name { + return tg + } + } + return nil +} + +// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined +// meta data for the task. When joining Job, Group and Task Meta, the precedence +// is by deepest scope (Task > Group > Job). +func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { + group := j.LookupTaskGroup(groupName) + if group == nil { + return nil + } + + task := group.LookupTask(taskName) + if task == nil { + return nil + } + + meta := helper.CopyMapStringString(task.Meta) + if meta == nil { + meta = make(map[string]string, len(group.Meta)+len(j.Meta)) + } + + // Add the group specific meta + for k, v := range group.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + // Add the job specific meta + for k, v := range j.Meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + + return meta +} + +// Stopped returns if a job is stopped. +func (j *Job) Stopped() bool { + return j == nil || j.Stop +} + +// HasUpdateStrategy returns if any task group in the job has an update strategy +func (j *Job) HasUpdateStrategy() bool { + for _, tg := range j.TaskGroups { + if tg.Update != nil { + return true + } + } + + return false +} + +// Stub is used to return a summary of the job +func (j *Job) Stub(summary *JobSummary) *JobListStub { + return &JobListStub{ + ID: j.ID, + ParentID: j.ParentID, + Name: j.Name, + Type: j.Type, + Priority: j.Priority, + Periodic: j.IsPeriodic(), + ParameterizedJob: j.IsParameterized(), + Stop: j.Stop, + Status: j.Status, + StatusDescription: j.StatusDescription, + CreateIndex: j.CreateIndex, + ModifyIndex: j.ModifyIndex, + JobModifyIndex: j.JobModifyIndex, + SubmitTime: j.SubmitTime, + JobSummary: summary, + } +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +// VaultPolicies returns the set of Vault policies per task group, per task +func (j *Job) VaultPolicies() map[string]map[string]*Vault { + policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) + + for _, tg := range j.TaskGroups { + tgPolicies := make(map[string]*Vault, len(tg.Tasks)) + + for _, task := range tg.Tasks { + if task.Vault == nil { + continue + } + + tgPolicies[task.Name] = task.Vault + } + + if len(tgPolicies) != 0 { + policies[tg.Name] = tgPolicies + } + } + + return policies +} + +// RequiredSignals returns a mapping of task groups to tasks to their required +// set of signals +func (j *Job) RequiredSignals() map[string]map[string][]string { + signals := make(map[string]map[string][]string) + + for _, tg := range j.TaskGroups { + for _, task := range tg.Tasks { + // Use this local one as a set + taskSignals := make(map[string]struct{}) + + // Check if the Vault change mode uses signals + if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { + taskSignals[task.Vault.ChangeSignal] = struct{}{} + } + + // Check if any template change mode uses signals + for _, t := range task.Templates { + if t.ChangeMode != TemplateChangeModeSignal { + continue + } + + taskSignals[t.ChangeSignal] = struct{}{} + } + + // Flatten and sort the signals + l := len(taskSignals) + if l == 0 { + continue + } + + flat := make([]string, 0, l) + for sig := range taskSignals { + flat = append(flat, sig) + } + + sort.Strings(flat) + tgSignals, ok := signals[tg.Name] + if !ok { + tgSignals = make(map[string][]string) + signals[tg.Name] = tgSignals + } + tgSignals[task.Name] = flat + } + + } + + return signals +} + +// SpecChanged determines if the functional specification has changed between +// two job versions. +func (j *Job) SpecChanged(new *Job) bool { + if j == nil { + return new != nil + } + + // Create a copy of the new job + c := new.Copy() + + // Update the new job so we can do a reflect + c.Status = j.Status + c.StatusDescription = j.StatusDescription + c.Stable = j.Stable + c.Version = j.Version + c.CreateIndex = j.CreateIndex + c.ModifyIndex = j.ModifyIndex + c.JobModifyIndex = j.JobModifyIndex + c.SubmitTime = j.SubmitTime + + // Deep equals the jobs + return !reflect.DeepEqual(j, c) +} + +func (j *Job) SetSubmitTime() { + j.SubmitTime = time.Now().UTC().UnixNano() +} + +// JobListStub is used to return a subset of job information +// for the job list +type JobListStub struct { + ID string + ParentID string + Name string + Type string + Priority int + Periodic bool + ParameterizedJob bool + Stop bool + Status string + StatusDescription string + JobSummary *JobSummary + CreateIndex uint64 + ModifyIndex uint64 + JobModifyIndex uint64 + SubmitTime int64 +} + +// JobSummary summarizes the state of the allocations of a job +type JobSummary struct { + // JobID is the ID of the job the summary is for + JobID string + + // Namespace is the namespace of the job and its summary + Namespace string + + // Summmary contains the summary per task group for the Job + Summary map[string]TaskGroupSummary + + // Children contains a summary for the children of this job. + Children *JobChildrenSummary + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// Copy returns a new copy of JobSummary +func (js *JobSummary) Copy() *JobSummary { + newJobSummary := new(JobSummary) + *newJobSummary = *js + newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) + for k, v := range js.Summary { + newTGSummary[k] = v + } + newJobSummary.Summary = newTGSummary + newJobSummary.Children = newJobSummary.Children.Copy() + return newJobSummary +} + +// JobChildrenSummary contains the summary of children job statuses +type JobChildrenSummary struct { + Pending int64 + Running int64 + Dead int64 +} + +// Copy returns a new copy of a JobChildrenSummary +func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { + if jc == nil { + return nil + } + + njc := new(JobChildrenSummary) + *njc = *jc + return njc +} + +// TaskGroup summarizes the state of all the allocations of a particular +// TaskGroup +type TaskGroupSummary struct { + Queued int + Complete int + Failed int + Running int + Starting int + Lost int +} + +const ( + // Checks uses any registered health check state in combination with task + // states to determine if a allocation is healthy. + UpdateStrategyHealthCheck_Checks = "checks" + + // TaskStates uses the task states of an allocation to determine if the + // allocation is healthy. + UpdateStrategyHealthCheck_TaskStates = "task_states" + + // Manual allows the operator to manually signal to Nomad when an + // allocations is healthy. This allows more advanced health checking that is + // outside of the scope of Nomad. + UpdateStrategyHealthCheck_Manual = "manual" +) + +var ( + // DefaultUpdateStrategy provides a baseline that can be used to upgrade + // jobs with the old policy or for populating field defaults. + DefaultUpdateStrategy = &UpdateStrategy{ + Stagger: 30 * time.Second, + MaxParallel: 1, + HealthCheck: UpdateStrategyHealthCheck_Checks, + MinHealthyTime: 10 * time.Second, + HealthyDeadline: 5 * time.Minute, + AutoRevert: false, + Canary: 0, + } +) + +// UpdateStrategy is used to modify how updates are done +type UpdateStrategy struct { + // Stagger is used to determine the rate at which allocations are migrated + // due to down or draining nodes. + Stagger time.Duration + + // MaxParallel is how many updates can be done in parallel + MaxParallel int + + // HealthCheck specifies the mechanism in which allocations are marked + // healthy or unhealthy as part of a deployment. + HealthCheck string + + // MinHealthyTime is the minimum time an allocation must be in the healthy + // state before it is marked as healthy, unblocking more alllocations to be + // rolled. + MinHealthyTime time.Duration + + // HealthyDeadline is the time in which an allocation must be marked as + // healthy before it is automatically transistioned to unhealthy. This time + // period doesn't count against the MinHealthyTime. + HealthyDeadline time.Duration + + // AutoRevert declares that if a deployment fails because of unhealthy + // allocations, there should be an attempt to auto-revert the job to a + // stable version. + AutoRevert bool + + // Canary is the number of canaries to deploy when a change to the task + // group is detected. + Canary int +} + +func (u *UpdateStrategy) Copy() *UpdateStrategy { + if u == nil { + return nil + } + + copy := new(UpdateStrategy) + *copy = *u + return copy +} + +func (u *UpdateStrategy) Validate() error { + if u == nil { + return nil + } + + var mErr multierror.Error + switch u.HealthCheck { + case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: + default: + multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) + } + + if u.MaxParallel < 1 { + multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel)) + } + if u.Canary < 0 { + multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) + } + if u.MinHealthyTime < 0 { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) + } + if u.HealthyDeadline <= 0 { + multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) + } + if u.MinHealthyTime >= u.HealthyDeadline { + multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) + } + if u.Stagger <= 0 { + multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) + } + + return mErr.ErrorOrNil() +} + +// TODO(alexdadgar): Remove once no longer used by the scheduler. +// Rolling returns if a rolling strategy should be used +func (u *UpdateStrategy) Rolling() bool { + return u.Stagger > 0 && u.MaxParallel > 0 +} + +const ( + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" + + // PeriodicSpecTest is only used by unit tests. It is a sorted, comma + // separated list of unix timestamps at which to launch. + PeriodicSpecTest = "_internal_test" +) + +// Periodic defines the interval a job should be run at. +type PeriodicConfig struct { + // Enabled determines if the job should be run periodically. + Enabled bool + + // Spec specifies the interval the job should be run as. It is parsed based + // on the SpecType. + Spec string + + // SpecType defines the format of the spec. + SpecType string + + // ProhibitOverlap enforces that spawned jobs do not run in parallel. + ProhibitOverlap bool + + // TimeZone is the user specified string that determines the time zone to + // launch against. The time zones must be specified from IANA Time Zone + // database, such as "America/New_York". + // Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + // Reference: https://www.iana.org/time-zones + TimeZone string + + // location is the time zone to evaluate the launch time against + location *time.Location +} + +func (p *PeriodicConfig) Copy() *PeriodicConfig { + if p == nil { + return nil + } + np := new(PeriodicConfig) + *np = *p + return np +} + +func (p *PeriodicConfig) Validate() error { + if !p.Enabled { + return nil + } + + var mErr multierror.Error + if p.Spec == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) + } + + // Check if we got a valid time zone + if p.TimeZone != "" { + if _, err := time.LoadLocation(p.TimeZone); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) + } + } + + switch p.SpecType { + case PeriodicSpecCron: + // Validate the cron spec + if _, err := cronexpr.Parse(p.Spec); err != nil { + multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) + } + case PeriodicSpecTest: + // No-op + default: + multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) + } + + return mErr.ErrorOrNil() +} + +func (p *PeriodicConfig) Canonicalize() { + // Load the location + l, err := time.LoadLocation(p.TimeZone) + if err != nil { + p.location = time.UTC + } + + p.location = l +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + switch p.SpecType { + case PeriodicSpecCron: + if e, err := cronexpr.Parse(p.Spec); err == nil { + return e.Next(fromTime) + } + case PeriodicSpecTest: + split := strings.Split(p.Spec, ",") + if len(split) == 1 && split[0] == "" { + return time.Time{} + } + + // Parse the times + times := make([]time.Time, len(split)) + for i, s := range split { + unix, err := strconv.Atoi(s) + if err != nil { + return time.Time{} + } + + times[i] = time.Unix(int64(unix), 0) + } + + // Find the next match + for _, next := range times { + if fromTime.Before(next) { + return next + } + } + } + + return time.Time{} +} + +// GetLocation returns the location to use for determining the time zone to run +// the periodic job against. +func (p *PeriodicConfig) GetLocation() *time.Location { + // Jobs pre 0.5.5 will not have this + if p.location != nil { + return p.location + } + + return time.UTC +} + +const ( + // PeriodicLaunchSuffix is the string appended to the periodic jobs ID + // when launching derived instances of it. + PeriodicLaunchSuffix = "/periodic-" +) + +// PeriodicLaunch tracks the last launch time of a periodic job. +type PeriodicLaunch struct { + ID string // ID of the periodic job. + Namespace string // Namespace of the periodic job + Launch time.Time // The last launch time. + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +const ( + DispatchPayloadForbidden = "forbidden" + DispatchPayloadOptional = "optional" + DispatchPayloadRequired = "required" + + // DispatchLaunchSuffix is the string appended to the parameterized job's ID + // when dispatching instances of it. + DispatchLaunchSuffix = "/dispatch-" +) + +// ParameterizedJobConfig is used to configure the parameterized job +type ParameterizedJobConfig struct { + // Payload configure the payload requirements + Payload string + + // MetaRequired is metadata keys that must be specified by the dispatcher + MetaRequired []string + + // MetaOptional is metadata keys that may be specified by the dispatcher + MetaOptional []string +} + +func (d *ParameterizedJobConfig) Validate() error { + var mErr multierror.Error + switch d.Payload { + case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) + } + + // Check that the meta configurations are disjoint sets + disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) + if !disjoint { + multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) + } + + return mErr.ErrorOrNil() +} + +func (d *ParameterizedJobConfig) Canonicalize() { + if d.Payload == "" { + d.Payload = DispatchPayloadOptional + } +} + +func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig { + if d == nil { + return nil + } + nd := new(ParameterizedJobConfig) + *nd = *d + nd.MetaOptional = helper.CopySliceString(nd.MetaOptional) + nd.MetaRequired = helper.CopySliceString(nd.MetaRequired) + return nd +} + +// DispatchedID returns an ID appropriate for a job dispatched against a +// particular parameterized job +func DispatchedID(templateID string, t time.Time) string { + u := GenerateUUID()[:8] + return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) +} + +// DispatchPayloadConfig configures how a task gets its input from a job dispatch +type DispatchPayloadConfig struct { + // File specifies a relative path to where the input data should be written + File string +} + +func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { + if d == nil { + return nil + } + nd := new(DispatchPayloadConfig) + *nd = *d + return nd +} + +func (d *DispatchPayloadConfig) Validate() error { + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task/local/", d.File) + if err != nil { + return fmt.Errorf("invalid destination path: %v", err) + } else if escaped { + return fmt.Errorf("destination escapes allocation directory") + } + + return nil +} + +var ( + defaultServiceJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 2, + Interval: 1 * time.Minute, + Mode: RestartPolicyModeDelay, + } + defaultBatchJobRestartPolicy = RestartPolicy{ + Delay: 15 * time.Second, + Attempts: 15, + Interval: 7 * 24 * time.Hour, + Mode: RestartPolicyModeDelay, + } +) + +const ( + // RestartPolicyModeDelay causes an artificial delay till the next interval is + // reached when the specified attempts have been reached in the interval. + RestartPolicyModeDelay = "delay" + + // RestartPolicyModeFail causes a job to fail if the specified number of + // attempts are reached within an interval. + RestartPolicyModeFail = "fail" + + // RestartPolicyMinInterval is the minimum interval that is accepted for a + // restart policy. + RestartPolicyMinInterval = 5 * time.Second +) + +// RestartPolicy configures how Tasks are restarted when they crash or fail. +type RestartPolicy struct { + // Attempts is the number of restart that will occur in an interval. + Attempts int + + // Interval is a duration in which we can limit the number of restarts + // within. + Interval time.Duration + + // Delay is the time between a failure and a restart. + Delay time.Duration + + // Mode controls what happens when the task restarts more than attempt times + // in an interval. + Mode string +} + +func (r *RestartPolicy) Copy() *RestartPolicy { + if r == nil { + return nil + } + nrp := new(RestartPolicy) + *nrp = *r + return nrp +} + +func (r *RestartPolicy) Validate() error { + var mErr multierror.Error + switch r.Mode { + case RestartPolicyModeDelay, RestartPolicyModeFail: + default: + multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) + } + + // Check for ambiguous/confusing settings + if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { + multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) + } + + if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { + multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) + } + if time.Duration(r.Attempts)*r.Delay > r.Interval { + multierror.Append(&mErr, + fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) + } + return mErr.ErrorOrNil() +} + +func NewRestartPolicy(jobType string) *RestartPolicy { + switch jobType { + case JobTypeService, JobTypeSystem: + rp := defaultServiceJobRestartPolicy + return &rp + case JobTypeBatch: + rp := defaultBatchJobRestartPolicy + return &rp + } + return nil +} + +// TaskGroup is an atomic unit of placement. Each task group belongs to +// a job and may contain any number of tasks. A task group support running +// in many replicas using the same configuration.. +type TaskGroup struct { + // Name of the task group + Name string + + // Count is the number of replicas of this task group that should + // be scheduled. + Count int + + // Update is used to control the update strategy for this task group + Update *UpdateStrategy + + // Constraints can be specified at a task group level and apply to + // all the tasks contained. + Constraints []*Constraint + + //RestartPolicy of a TaskGroup + RestartPolicy *RestartPolicy + + // Tasks are the collection of tasks that this task group needs to run + Tasks []*Task + + // EphemeralDisk is the disk resources that the task group requests + EphemeralDisk *EphemeralDisk + + // Meta is used to associate arbitrary metadata with this + // task group. This is opaque to Nomad. + Meta map[string]string +} + +func (tg *TaskGroup) Copy() *TaskGroup { + if tg == nil { + return nil + } + ntg := new(TaskGroup) + *ntg = *tg + ntg.Update = ntg.Update.Copy() + ntg.Constraints = CopySliceConstraints(ntg.Constraints) + ntg.RestartPolicy = ntg.RestartPolicy.Copy() + + if tg.Tasks != nil { + tasks := make([]*Task, len(ntg.Tasks)) + for i, t := range ntg.Tasks { + tasks[i] = t.Copy() + } + ntg.Tasks = tasks + } + + ntg.Meta = helper.CopyMapStringString(ntg.Meta) + + if tg.EphemeralDisk != nil { + ntg.EphemeralDisk = tg.EphemeralDisk.Copy() + } + return ntg +} + +// Canonicalize is used to canonicalize fields in the TaskGroup. +func (tg *TaskGroup) Canonicalize(job *Job) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(tg.Meta) == 0 { + tg.Meta = nil + } + + // Set the default restart policy. + if tg.RestartPolicy == nil { + tg.RestartPolicy = NewRestartPolicy(job.Type) + } + + // Set a default ephemeral disk object if the user has not requested for one + if tg.EphemeralDisk == nil { + tg.EphemeralDisk = DefaultEphemeralDisk() + } + + for _, task := range tg.Tasks { + task.Canonicalize(job, tg) + } + + // Add up the disk resources to EphemeralDisk. This is done so that users + // are not required to move their disk attribute from resources to + // EphemeralDisk section of the job spec in Nomad 0.5 + // COMPAT 0.4.1 -> 0.5 + // Remove in 0.6 + var diskMB int + for _, task := range tg.Tasks { + diskMB += task.Resources.DiskMB + } + if diskMB > 0 { + tg.EphemeralDisk.SizeMB = diskMB + } +} + +// Validate is used to sanity check a task group +func (tg *TaskGroup) Validate(j *Job) error { + var mErr multierror.Error + if tg.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) + } + if tg.Count < 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) + } + if len(tg.Tasks) == 0 { + mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) + } + for idx, constr := range tg.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if tg.RestartPolicy != nil { + if err := tg.RestartPolicy.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) + } + + if tg.EphemeralDisk != nil { + if err := tg.EphemeralDisk.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } else { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) + } + + // Validate the update strategy + if u := tg.Update; u != nil { + switch j.Type { + case JobTypeService, JobTypeSystem: + default: + // COMPAT: Enable in 0.7.0 + //mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) + } + if err := u.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + + // Check for duplicate tasks, that there is only leader task if any, + // and no duplicated static ports + tasks := make(map[string]int) + staticPorts := make(map[int]string) + leaderTasks := 0 + for idx, task := range tg.Tasks { + if task.Name == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) + } else if existing, ok := tasks[task.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) + } else { + tasks[task.Name] = idx + } + + if task.Leader { + leaderTasks++ + } + + if task.Resources == nil { + continue + } + + for _, net := range task.Resources.Networks { + for _, port := range net.ReservedPorts { + if other, ok := staticPorts[port.Value]; ok { + err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other) + mErr.Errors = append(mErr.Errors, err) + } else { + staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label) + } + } + } + } + + if leaderTasks > 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) + } + + // Validate the tasks + for _, task := range tg.Tasks { + if err := task.Validate(tg.EphemeralDisk); err != nil { + outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + return mErr.ErrorOrNil() +} + +// Warnings returns a list of warnings that may be from dubious settings or +// deprecation warnings. +func (tg *TaskGroup) Warnings(j *Job) error { + var mErr multierror.Error + + // Validate the update strategy + if u := tg.Update; u != nil { + // Check the counts are appropriate + if u.MaxParallel > tg.Count { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+ + "A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count)) + } + } + + return mErr.ErrorOrNil() +} + +// LookupTask finds a task by name +func (tg *TaskGroup) LookupTask(name string) *Task { + for _, t := range tg.Tasks { + if t.Name == name { + return t + } + } + return nil +} + +func (tg *TaskGroup) GoString() string { + return fmt.Sprintf("*%#v", *tg) +} + +// CheckRestart describes if and when a task should be restarted based on +// failing health checks. +type CheckRestart struct { + Limit int // Restart task after this many unhealthy intervals + Grace time.Duration // Grace time to give tasks after starting to get healthy + IgnoreWarnings bool // If true treat checks in `warning` as passing +} + +func (c *CheckRestart) Copy() *CheckRestart { + if c == nil { + return nil + } + + nc := new(CheckRestart) + *nc = *c + return nc +} + +func (c *CheckRestart) Validate() error { + if c == nil { + return nil + } + + var mErr multierror.Error + if c.Limit < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit)) + } + + if c.Grace < 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace)) + } + + return mErr.ErrorOrNil() +} + +const ( + ServiceCheckHTTP = "http" + ServiceCheckTCP = "tcp" + ServiceCheckScript = "script" + + // minCheckInterval is the minimum check interval permitted. Consul + // currently has its MinInterval set to 1s. Mirror that here for + // consistency. + minCheckInterval = 1 * time.Second + + // minCheckTimeout is the minimum check timeout permitted for Consul + // script TTL checks. + minCheckTimeout = 1 * time.Second +) + +// The ServiceCheck data model represents the consul health check that +// Nomad registers for a Task +type ServiceCheck struct { + Name string // Name of the check, defaults to id + Type string // Type of the check - tcp, http, docker and script + Command string // Command is the command to run for script checks + Args []string // Args is a list of argumes for script checks + Path string // path of the health check url for http type check + Protocol string // Protocol to use if check is http, defaults to http + PortLabel string // The port to use for tcp/http checks + Interval time.Duration // Interval of the check + Timeout time.Duration // Timeout of the response from the check before consul fails the check + InitialStatus string // Initial status of the check + TLSSkipVerify bool // Skip TLS verification when Protocol=https + Method string // HTTP Method to use (GET by default) + Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks + CheckRestart *CheckRestart // If and when a task should be restarted based on checks +} + +func (sc *ServiceCheck) Copy() *ServiceCheck { + if sc == nil { + return nil + } + nsc := new(ServiceCheck) + *nsc = *sc + nsc.Args = helper.CopySliceString(sc.Args) + nsc.Header = helper.CopyMapStringSliceString(sc.Header) + nsc.CheckRestart = sc.CheckRestart.Copy() + return nsc +} + +func (sc *ServiceCheck) Canonicalize(serviceName string) { + // Ensure empty maps/slices are treated as null to avoid scheduling + // issues when using DeepEquals. + if len(sc.Args) == 0 { + sc.Args = nil + } + + if len(sc.Header) == 0 { + sc.Header = nil + } else { + for k, v := range sc.Header { + if len(v) == 0 { + sc.Header[k] = nil + } + } + } + + if sc.Name == "" { + sc.Name = fmt.Sprintf("service: %q check", serviceName) + } +} + +// validate a Service's ServiceCheck +func (sc *ServiceCheck) validate() error { + switch strings.ToLower(sc.Type) { + case ServiceCheckTCP: + case ServiceCheckHTTP: + if sc.Path == "" { + return fmt.Errorf("http type must have a valid http path") + } + + case ServiceCheckScript: + if sc.Command == "" { + return fmt.Errorf("script type must have a valid script path") + } + default: + return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type) + } + + if sc.Interval == 0 { + return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval) + } else if sc.Interval < minCheckInterval { + return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval) + } + + if sc.Timeout == 0 { + return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval) + } else if sc.Timeout < minCheckTimeout { + return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval) + } + + switch sc.InitialStatus { + case "": + // case api.HealthUnknown: TODO: Add when Consul releases 0.7.1 + case api.HealthPassing: + case api.HealthWarning: + case api.HealthCritical: + default: + return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) + + } + + return sc.CheckRestart.Validate() +} + +// RequiresPort returns whether the service check requires the task has a port. +func (sc *ServiceCheck) RequiresPort() bool { + switch sc.Type { + case ServiceCheckHTTP, ServiceCheckTCP: + return true + default: + return false + } +} + +// TriggersRestarts returns true if this check should be watched and trigger a restart +// on failure. +func (sc *ServiceCheck) TriggersRestarts() bool { + return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0 +} + +// Hash all ServiceCheck fields and the check's corresponding service ID to +// create an identifier. The identifier is not guaranteed to be unique as if +// the PortLabel is blank, the Service's PortLabel will be used after Hash is +// called. +func (sc *ServiceCheck) Hash(serviceID string) string { + h := sha1.New() + io.WriteString(h, serviceID) + io.WriteString(h, sc.Name) + io.WriteString(h, sc.Type) + io.WriteString(h, sc.Command) + io.WriteString(h, strings.Join(sc.Args, "")) + io.WriteString(h, sc.Path) + io.WriteString(h, sc.Protocol) + io.WriteString(h, sc.PortLabel) + io.WriteString(h, sc.Interval.String()) + io.WriteString(h, sc.Timeout.String()) + io.WriteString(h, sc.Method) + // Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6 + if sc.TLSSkipVerify { + io.WriteString(h, "true") + } + + // Since map iteration order isn't stable we need to write k/v pairs to + // a slice and sort it before hashing. + if len(sc.Header) > 0 { + headers := make([]string, 0, len(sc.Header)) + for k, v := range sc.Header { + headers = append(headers, k+strings.Join(v, "")) + } + sort.Strings(headers) + io.WriteString(h, strings.Join(headers, "")) + } + + return fmt.Sprintf("%x", h.Sum(nil)) +} + +const ( + AddressModeAuto = "auto" + AddressModeHost = "host" + AddressModeDriver = "driver" +) + +// Service represents a Consul service definition in Nomad +type Service struct { + // Name of the service registered with Consul. Consul defaults the + // Name to ServiceID if not specified. The Name if specified is used + // as one of the seed values when generating a Consul ServiceID. + Name string + + // PortLabel is either the numeric port number or the `host:port`. + // To specify the port number using the host's Consul Advertise + // address, specify an empty host in the PortLabel (e.g. `:port`). + PortLabel string + + // AddressMode specifies whether or not to use the host ip:port for + // this service. + AddressMode string + + Tags []string // List of tags for the service + Checks []*ServiceCheck // List of checks associated with the service +} + +func (s *Service) Copy() *Service { + if s == nil { + return nil + } + ns := new(Service) + *ns = *s + ns.Tags = helper.CopySliceString(ns.Tags) + + if s.Checks != nil { + checks := make([]*ServiceCheck, len(ns.Checks)) + for i, c := range ns.Checks { + checks[i] = c.Copy() + } + ns.Checks = checks + } + + return ns +} + +// Canonicalize interpolates values of Job, Task Group and Task in the Service +// Name. This also generates check names, service id and check ids. +func (s *Service) Canonicalize(job string, taskGroup string, task string) { + // Ensure empty lists are treated as null to avoid scheduler issues when + // using DeepEquals + if len(s.Tags) == 0 { + s.Tags = nil + } + if len(s.Checks) == 0 { + s.Checks = nil + } + + s.Name = args.ReplaceEnv(s.Name, map[string]string{ + "JOB": job, + "TASKGROUP": taskGroup, + "TASK": task, + "BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task), + }, + ) + + for _, check := range s.Checks { + check.Canonicalize(s.Name) + } +} + +// Validate checks if the Check definition is valid +func (s *Service) Validate() error { + var mErr multierror.Error + + // Ensure the service name is valid per the below RFCs but make an exception + // for our interpolation syntax + // RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 + // (https://tools.ietf.org/html/rfc1123), and RFC-2782 + // (https://tools.ietf.org/html/rfc2782). + re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`) + if !re.MatchString(s.Name) { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name)) + } + + switch s.AddressMode { + case "", AddressModeAuto, AddressModeHost, AddressModeDriver: + // OK + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode)) + } + + for _, c := range s.Checks { + if s.PortLabel == "" && c.RequiresPort() { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name)) + continue + } + + if err := c.validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err)) + } + } + + return mErr.ErrorOrNil() +} + +// ValidateName checks if the services Name is valid and should be called after +// the name has been interpolated +func (s *Service) ValidateName(name string) error { + // Ensure the service name is valid per RFC-952 §1 + // (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 + // (https://tools.ietf.org/html/rfc1123), and RFC-2782 + // (https://tools.ietf.org/html/rfc2782). + re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`) + if !re.MatchString(name) { + return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name) + } + return nil +} + +// Hash calculates the hash of the check based on it's content and the service +// which owns it +func (s *Service) Hash() string { + h := sha1.New() + io.WriteString(h, s.Name) + io.WriteString(h, strings.Join(s.Tags, "")) + io.WriteString(h, s.PortLabel) + io.WriteString(h, s.AddressMode) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +const ( + // DefaultKillTimeout is the default timeout between signaling a task it + // will be killed and killing it. + DefaultKillTimeout = 5 * time.Second +) + +// LogConfig provides configuration for log rotation +type LogConfig struct { + MaxFiles int + MaxFileSizeMB int +} + +// DefaultLogConfig returns the default LogConfig values. +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 10, + } +} + +// Validate returns an error if the log config specified are less than +// the minimum allowed. +func (l *LogConfig) Validate() error { + var mErr multierror.Error + if l.MaxFiles < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles)) + } + if l.MaxFileSizeMB < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB)) + } + return mErr.ErrorOrNil() +} + +// Task is a single process typically that is executed as part of a task group. +type Task struct { + // Name of the task + Name string + + // Driver is used to control which driver is used + Driver string + + // User is used to determine which user will run the task. It defaults to + // the same user the Nomad client is being run as. + User string + + // Config is provided to the driver to initialize + Config map[string]interface{} + + // Map of environment variables to be used by the driver + Env map[string]string + + // List of service definitions exposed by the Task + Services []*Service + + // Vault is used to define the set of Vault policies that this task should + // have access to. + Vault *Vault + + // Templates are the set of templates to be rendered for the task. + Templates []*Template + + // Constraints can be specified at a task level and apply only to + // the particular task. + Constraints []*Constraint + + // Resources is the resources needed by this task + Resources *Resources + + // DispatchPayload configures how the task retrieves its input from a dispatch + DispatchPayload *DispatchPayloadConfig + + // Meta is used to associate arbitrary metadata with this + // task. This is opaque to Nomad. + Meta map[string]string + + // KillTimeout is the time between signaling a task that it will be + // killed and killing it. + KillTimeout time.Duration + + // LogConfig provides configuration for log rotation + LogConfig *LogConfig + + // Artifacts is a list of artifacts to download and extract before running + // the task. + Artifacts []*TaskArtifact + + // Leader marks the task as the leader within the group. When the leader + // task exits, other tasks will be gracefully terminated. + Leader bool + + // ShutdownDelay is the duration of the delay between deregistering a + // task from Consul and sending it a signal to shutdown. See #2441 + ShutdownDelay time.Duration +} + +func (t *Task) Copy() *Task { + if t == nil { + return nil + } + nt := new(Task) + *nt = *t + nt.Env = helper.CopyMapStringString(nt.Env) + + if t.Services != nil { + services := make([]*Service, len(nt.Services)) + for i, s := range nt.Services { + services[i] = s.Copy() + } + nt.Services = services + } + + nt.Constraints = CopySliceConstraints(nt.Constraints) + + nt.Vault = nt.Vault.Copy() + nt.Resources = nt.Resources.Copy() + nt.Meta = helper.CopyMapStringString(nt.Meta) + nt.DispatchPayload = nt.DispatchPayload.Copy() + + if t.Artifacts != nil { + artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) + for _, a := range nt.Artifacts { + artifacts = append(artifacts, a.Copy()) + } + nt.Artifacts = artifacts + } + + if i, err := copystructure.Copy(nt.Config); err != nil { + panic(err.Error()) + } else { + nt.Config = i.(map[string]interface{}) + } + + if t.Templates != nil { + templates := make([]*Template, len(t.Templates)) + for i, tmpl := range nt.Templates { + templates[i] = tmpl.Copy() + } + nt.Templates = templates + } + + return nt +} + +// Canonicalize canonicalizes fields in the task. +func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { + // Ensure that an empty and nil map are treated the same to avoid scheduling + // problems since we use reflect DeepEquals. + if len(t.Meta) == 0 { + t.Meta = nil + } + if len(t.Config) == 0 { + t.Config = nil + } + if len(t.Env) == 0 { + t.Env = nil + } + + for _, service := range t.Services { + service.Canonicalize(job.Name, tg.Name, t.Name) + } + + // If Resources are nil initialize them to defaults, otherwise canonicalize + if t.Resources == nil { + t.Resources = DefaultResources() + } else { + t.Resources.Canonicalize() + } + + // Set the default timeout if it is not specified. + if t.KillTimeout == 0 { + t.KillTimeout = DefaultKillTimeout + } + + if t.Vault != nil { + t.Vault.Canonicalize() + } + + for _, template := range t.Templates { + template.Canonicalize() + } +} + +func (t *Task) GoString() string { + return fmt.Sprintf("*%#v", *t) +} + +// Validate is used to sanity check a task +func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { + var mErr multierror.Error + if t.Name == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task name")) + } + if strings.ContainsAny(t.Name, `/\`) { + // We enforce this so that when creating the directory on disk it will + // not have any slashes. + mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes")) + } + if t.Driver == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing task driver")) + } + if t.KillTimeout < 0 { + mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value")) + } + if t.ShutdownDelay < 0 { + mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value")) + } + + // Validate the resources. + if t.Resources == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing task resources")) + } else { + if err := t.Resources.MeetsMinResources(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + // Ensure the task isn't asking for disk resources + if t.Resources.DiskMB > 0 { + mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level.")) + } + } + + // Validate the log config + if t.LogConfig == nil { + mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config")) + } else if err := t.LogConfig.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + for idx, constr := range t.Constraints { + if err := constr.Validate(); err != nil { + outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + switch constr.Operand { + case ConstraintDistinctHosts, ConstraintDistinctProperty: + outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand) + mErr.Errors = append(mErr.Errors, outer) + } + } + + // Validate Services + if err := validateServices(t); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if t.LogConfig != nil && ephemeralDisk != nil { + logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB) + if ephemeralDisk.SizeMB <= logUsage { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)", + logUsage, ephemeralDisk.SizeMB)) + } + } + + for idx, artifact := range t.Artifacts { + if err := artifact.Validate(); err != nil { + outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + } + + if t.Vault != nil { + if err := t.Vault.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err)) + } + } + + destinations := make(map[string]int, len(t.Templates)) + for idx, tmpl := range t.Templates { + if err := tmpl.Validate(); err != nil { + outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err) + mErr.Errors = append(mErr.Errors, outer) + } + + if other, ok := destinations[tmpl.DestPath]; ok { + outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other) + mErr.Errors = append(mErr.Errors, outer) + } else { + destinations[tmpl.DestPath] = idx + 1 + } + } + + // Validate the dispatch payload block if there + if t.DispatchPayload != nil { + if err := t.DispatchPayload.Validate(); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) + } + } + + return mErr.ErrorOrNil() +} + +// validateServices takes a task and validates the services within it are valid +// and reference ports that exist. +func validateServices(t *Task) error { + var mErr multierror.Error + + // Ensure that services don't ask for non-existent ports and their names are + // unique. + servicePorts := make(map[string][]string) + knownServices := make(map[string]struct{}) + for i, service := range t.Services { + if err := service.Validate(); err != nil { + outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err) + mErr.Errors = append(mErr.Errors, outer) + } + + // Ensure that services with the same name are not being registered for + // the same port + if _, ok := knownServices[service.Name+service.PortLabel]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name)) + } + knownServices[service.Name+service.PortLabel] = struct{}{} + + if service.PortLabel != "" { + servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name) + } + + // Ensure that check names are unique. + knownChecks := make(map[string]struct{}) + for _, check := range service.Checks { + if _, ok := knownChecks[check.Name]; ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name)) + } + knownChecks[check.Name] = struct{}{} + } + } + + // Get the set of port labels. + portLabels := make(map[string]struct{}) + if t.Resources != nil { + for _, network := range t.Resources.Networks { + ports := network.PortLabels() + for portLabel, _ := range ports { + portLabels[portLabel] = struct{}{} + } + } + } + + // Ensure all ports referenced in services exist. + for servicePort, services := range servicePorts { + _, ok := portLabels[servicePort] + if !ok { + joined := strings.Join(services, ", ") + err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined) + mErr.Errors = append(mErr.Errors, err) + } + } + + // Ensure address mode is valid + return mErr.ErrorOrNil() +} + +const ( + // TemplateChangeModeNoop marks that no action should be taken if the + // template is re-rendered + TemplateChangeModeNoop = "noop" + + // TemplateChangeModeSignal marks that the task should be signaled if the + // template is re-rendered + TemplateChangeModeSignal = "signal" + + // TemplateChangeModeRestart marks that the task should be restarted if the + // template is re-rendered + TemplateChangeModeRestart = "restart" +) + +var ( + // TemplateChangeModeInvalidError is the error for when an invalid change + // mode is given + TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") +) + +// Template represents a template configuration to be rendered for a given task +type Template struct { + // SourcePath is the path to the template to be rendered + SourcePath string + + // DestPath is the path to where the template should be rendered + DestPath string + + // EmbeddedTmpl store the raw template. This is useful for smaller templates + // where they are embedded in the job file rather than sent as an artificat + EmbeddedTmpl string + + // ChangeMode indicates what should be done if the template is re-rendered + ChangeMode string + + // ChangeSignal is the signal that should be sent if the change mode + // requires it. + ChangeSignal string + + // Splay is used to avoid coordinated restarts of processes by applying a + // random wait between 0 and the given splay value before signalling the + // application of a change + Splay time.Duration + + // Perms is the permission the file should be written out with. + Perms string + + // LeftDelim and RightDelim are optional configurations to control what + // delimiter is utilized when parsing the template. + LeftDelim string + RightDelim string + + // Envvars enables exposing the template as environment variables + // instead of as a file. The template must be of the form: + // + // VAR_NAME_1={{ key service/my-key }} + // VAR_NAME_2=raw string and {{ env "attr.kernel.name" }} + // + // Lines will be split on the initial "=" with the first part being the + // key name and the second part the value. + // Empty lines and lines starting with # will be ignored, but to avoid + // escaping issues #s within lines will not be treated as comments. + Envvars bool + + // VaultGrace is the grace duration between lease renewal and reacquiring a + // secret. If the lease of a secret is less than the grace, a new secret is + // acquired. + VaultGrace time.Duration +} + +// DefaultTemplate returns a default template. +func DefaultTemplate() *Template { + return &Template{ + ChangeMode: TemplateChangeModeRestart, + Splay: 5 * time.Second, + Perms: "0644", + } +} + +func (t *Template) Copy() *Template { + if t == nil { + return nil + } + copy := new(Template) + *copy = *t + return copy +} + +func (t *Template) Canonicalize() { + if t.ChangeSignal != "" { + t.ChangeSignal = strings.ToUpper(t.ChangeSignal) + } +} + +func (t *Template) Validate() error { + var mErr multierror.Error + + // Verify we have something to render + if t.SourcePath == "" && t.EmbeddedTmpl == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) + } + + // Verify we can render somewhere + if t.DestPath == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) + } + + // Verify the destination doesn't escape + escaped, err := PathEscapesAllocDir("task", t.DestPath) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + // Verify a proper change mode + switch t.ChangeMode { + case TemplateChangeModeNoop, TemplateChangeModeRestart: + case TemplateChangeModeSignal: + if t.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) + } + if t.Envvars { + multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) + } + default: + multierror.Append(&mErr, TemplateChangeModeInvalidError) + } + + // Verify the splay is positive + if t.Splay < 0 { + multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) + } + + // Verify the permissions + if t.Perms != "" { + if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { + multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) + } + } + + if t.VaultGrace.Nanoseconds() < 0 { + multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace)) + } + + return mErr.ErrorOrNil() +} + +// Set of possible states for a task. +const ( + TaskStatePending = "pending" // The task is waiting to be run. + TaskStateRunning = "running" // The task is currently running. + TaskStateDead = "dead" // Terminal state of task. +) + +// TaskState tracks the current state of a task and events that caused state +// transitions. +type TaskState struct { + // The current state of the task. + State string + + // Failed marks a task as having failed + Failed bool + + // Restarts is the number of times the task has restarted + Restarts uint64 + + // LastRestart is the time the task last restarted. It is updated each time the + // task restarts + LastRestart time.Time + + // StartedAt is the time the task is started. It is updated each time the + // task starts + StartedAt time.Time + + // FinishedAt is the time at which the task transistioned to dead and will + // not be started again. + FinishedAt time.Time + + // Series of task events that transition the state of the task. + Events []*TaskEvent +} + +func (ts *TaskState) Copy() *TaskState { + if ts == nil { + return nil + } + copy := new(TaskState) + *copy = *ts + + if ts.Events != nil { + copy.Events = make([]*TaskEvent, len(ts.Events)) + for i, e := range ts.Events { + copy.Events[i] = e.Copy() + } + } + return copy +} + +// Successful returns whether a task finished successfully. +func (ts *TaskState) Successful() bool { + l := len(ts.Events) + if ts.State != TaskStateDead || l == 0 { + return false + } + + e := ts.Events[l-1] + if e.Type != TaskTerminated { + return false + } + + return e.ExitCode == 0 +} + +const ( + // TaskSetupFailure indicates that the task could not be started due to a + // a setup failure. + TaskSetupFailure = "Setup Failure" + + // TaskDriveFailure indicates that the task could not be started due to a + // failure in the driver. + TaskDriverFailure = "Driver Failure" + + // TaskReceived signals that the task has been pulled by the client at the + // given timestamp. + TaskReceived = "Received" + + // TaskFailedValidation indicates the task was invalid and as such was not + // run. + TaskFailedValidation = "Failed Validation" + + // TaskStarted signals that the task was started and its timestamp can be + // used to determine the running length of the task. + TaskStarted = "Started" + + // TaskTerminated indicates that the task was started and exited. + TaskTerminated = "Terminated" + + // TaskKilling indicates a kill signal has been sent to the task. + TaskKilling = "Killing" + + // TaskKilled indicates a user has killed the task. + TaskKilled = "Killed" + + // TaskRestarting indicates that task terminated and is being restarted. + TaskRestarting = "Restarting" + + // TaskNotRestarting indicates that the task has failed and is not being + // restarted because it has exceeded its restart policy. + TaskNotRestarting = "Not Restarting" + + // TaskRestartSignal indicates that the task has been signalled to be + // restarted + TaskRestartSignal = "Restart Signaled" + + // TaskSignaling indicates that the task is being signalled. + TaskSignaling = "Signaling" + + // TaskDownloadingArtifacts means the task is downloading the artifacts + // specified in the task. + TaskDownloadingArtifacts = "Downloading Artifacts" + + // TaskArtifactDownloadFailed indicates that downloading the artifacts + // failed. + TaskArtifactDownloadFailed = "Failed Artifact Download" + + // TaskBuildingTaskDir indicates that the task directory/chroot is being + // built. + TaskBuildingTaskDir = "Building Task Directory" + + // TaskSetup indicates the task runner is setting up the task environment + TaskSetup = "Task Setup" + + // TaskDiskExceeded indicates that one of the tasks in a taskgroup has + // exceeded the requested disk resources. + TaskDiskExceeded = "Disk Resources Exceeded" + + // TaskSiblingFailed indicates that a sibling task in the task group has + // failed. + TaskSiblingFailed = "Sibling Task Failed" + + // TaskDriverMessage is an informational event message emitted by + // drivers such as when they're performing a long running action like + // downloading an image. + TaskDriverMessage = "Driver" + + // TaskLeaderDead indicates that the leader task within the has finished. + TaskLeaderDead = "Leader Task Dead" + + // TaskGenericMessage is used by various subsystems to emit a message. + TaskGenericMessage = "Generic" +) + +// TaskEvent is an event that effects the state of a task and contains meta-data +// appropriate to the events type. +type TaskEvent struct { + Type string + Time int64 // Unix Nanosecond timestamp + + // FailsTask marks whether this event fails the task + FailsTask bool + + // Restart fields. + RestartReason string + + // Setup Failure fields. + SetupError string + + // Driver Failure fields. + DriverError string // A driver error occurred while starting the task. + + // Task Terminated Fields. + ExitCode int // The exit code of the task. + Signal int // The signal that terminated the task. + Message string // A possible message explaining the termination of the task. + + // Killing fields + KillTimeout time.Duration + + // Task Killed Fields. + KillError string // Error killing the task. + + // KillReason is the reason the task was killed + KillReason string + + // TaskRestarting fields. + StartDelay int64 // The sleep period before restarting the task in unix nanoseconds. + + // Artifact Download fields + DownloadError string // Error downloading artifacts + + // Validation fields + ValidationError string // Validation error + + // The maximum allowed task disk size. + DiskLimit int64 + + // Name of the sibling task that caused termination of the task that + // the TaskEvent refers to. + FailedSibling string + + // VaultError is the error from token renewal + VaultError string + + // TaskSignalReason indicates the reason the task is being signalled. + TaskSignalReason string + + // TaskSignal is the signal that was sent to the task + TaskSignal string + + // DriverMessage indicates a driver action being taken. + DriverMessage string + + // GenericSource is the source of a message. + GenericSource string +} + +func (te *TaskEvent) GoString() string { + return fmt.Sprintf("%v - %v", te.Time, te.Type) +} + +// SetMessage sets the message of TaskEvent +func (te *TaskEvent) SetMessage(msg string) *TaskEvent { + te.Message = msg + return te +} + +func (te *TaskEvent) Copy() *TaskEvent { + if te == nil { + return nil + } + copy := new(TaskEvent) + *copy = *te + return copy +} + +func NewTaskEvent(event string) *TaskEvent { + return &TaskEvent{ + Type: event, + Time: time.Now().UnixNano(), + } +} + +// SetSetupError is used to store an error that occurred while setting up the +// task +func (e *TaskEvent) SetSetupError(err error) *TaskEvent { + if err != nil { + e.SetupError = err.Error() + } + return e +} + +func (e *TaskEvent) SetFailsTask() *TaskEvent { + e.FailsTask = true + return e +} + +func (e *TaskEvent) SetDriverError(err error) *TaskEvent { + if err != nil { + e.DriverError = err.Error() + } + return e +} + +func (e *TaskEvent) SetExitCode(c int) *TaskEvent { + e.ExitCode = c + return e +} + +func (e *TaskEvent) SetSignal(s int) *TaskEvent { + e.Signal = s + return e +} + +func (e *TaskEvent) SetExitMessage(err error) *TaskEvent { + if err != nil { + e.Message = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillError(err error) *TaskEvent { + if err != nil { + e.KillError = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillReason(r string) *TaskEvent { + e.KillReason = r + return e +} + +func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent { + e.StartDelay = int64(delay) + return e +} + +func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent { + e.RestartReason = reason + return e +} + +func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent { + e.TaskSignalReason = r + return e +} + +func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent { + e.TaskSignal = s.String() + return e +} + +func (e *TaskEvent) SetDownloadError(err error) *TaskEvent { + if err != nil { + e.DownloadError = err.Error() + } + return e +} + +func (e *TaskEvent) SetValidationError(err error) *TaskEvent { + if err != nil { + e.ValidationError = err.Error() + } + return e +} + +func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent { + e.KillTimeout = timeout + return e +} + +func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent { + e.DiskLimit = limit + return e +} + +func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent { + e.FailedSibling = sibling + return e +} + +func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent { + if err != nil { + e.VaultError = err.Error() + } + return e +} + +func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent { + e.DriverMessage = m + return e +} + +func (e *TaskEvent) SetGenericSource(s string) *TaskEvent { + e.GenericSource = s + return e +} + +// TaskArtifact is an artifact to download before running the task. +type TaskArtifact struct { + // GetterSource is the source to download an artifact using go-getter + GetterSource string + + // GetterOptions are options to use when downloading the artifact using + // go-getter. + GetterOptions map[string]string + + // GetterMode is the go-getter.ClientMode for fetching resources. + // Defaults to "any" but can be set to "file" or "dir". + GetterMode string + + // RelativeDest is the download destination given relative to the task's + // directory. + RelativeDest string +} + +func (ta *TaskArtifact) Copy() *TaskArtifact { + if ta == nil { + return nil + } + nta := new(TaskArtifact) + *nta = *ta + nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions) + return nta +} + +func (ta *TaskArtifact) GoString() string { + return fmt.Sprintf("%+v", ta) +} + +// PathEscapesAllocDir returns if the given path escapes the allocation +// directory. The prefix allows adding a prefix if the path will be joined, for +// example a "task/local" prefix may be provided if the path will be joined +// against that prefix. +func PathEscapesAllocDir(prefix, path string) (bool, error) { + // Verify the destination doesn't escape the tasks directory + alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) + if err != nil { + return false, err + } + abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) + if err != nil { + return false, err + } + rel, err := filepath.Rel(alloc, abs) + if err != nil { + return false, err + } + + return strings.HasPrefix(rel, ".."), nil +} + +func (ta *TaskArtifact) Validate() error { + // Verify the source + var mErr multierror.Error + if ta.GetterSource == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) + } + + switch ta.GetterMode { + case "": + // Default to any + ta.GetterMode = GetterModeAny + case GetterModeAny, GetterModeFile, GetterModeDir: + // Ok + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s", + ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir)) + } + + escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) + } else if escaped { + mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) + } + + // Verify the checksum + if check, ok := ta.GetterOptions["checksum"]; ok { + check = strings.TrimSpace(check) + if check == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty")) + return mErr.ErrorOrNil() + } + + parts := strings.Split(check, ":") + if l := len(parts); l != 2 { + mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)) + return mErr.ErrorOrNil() + } + + checksumVal := parts[1] + checksumBytes, err := hex.DecodeString(checksumVal) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err)) + return mErr.ErrorOrNil() + } + + checksumType := parts[0] + expectedLength := 0 + switch checksumType { + case "md5": + expectedLength = md5.Size + case "sha1": + expectedLength = sha1.Size + case "sha256": + expectedLength = sha256.Size + case "sha512": + expectedLength = sha512.Size + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType)) + return mErr.ErrorOrNil() + } + + if len(checksumBytes) != expectedLength { + mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)) + return mErr.ErrorOrNil() + } + } + + return mErr.ErrorOrNil() +} + +const ( + ConstraintDistinctProperty = "distinct_property" + ConstraintDistinctHosts = "distinct_hosts" + ConstraintRegex = "regexp" + ConstraintVersion = "version" + ConstraintSetContains = "set_contains" +) + +// Constraints are used to restrict placement options. +type Constraint struct { + LTarget string // Left-hand target + RTarget string // Right-hand target + Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near + str string // Memoized string +} + +// Equal checks if two constraints are equal +func (c *Constraint) Equal(o *Constraint) bool { + return c.LTarget == o.LTarget && + c.RTarget == o.RTarget && + c.Operand == o.Operand +} + +func (c *Constraint) Copy() *Constraint { + if c == nil { + return nil + } + nc := new(Constraint) + *nc = *c + return nc +} + +func (c *Constraint) String() string { + if c.str != "" { + return c.str + } + c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget) + return c.str +} + +func (c *Constraint) Validate() error { + var mErr multierror.Error + if c.Operand == "" { + mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand")) + } + + // requireLtarget specifies whether the constraint requires an LTarget to be + // provided. + requireLtarget := true + + // Perform additional validation based on operand + switch c.Operand { + case ConstraintDistinctHosts: + requireLtarget = false + case ConstraintSetContains: + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget")) + } + case ConstraintRegex: + if _, err := regexp.Compile(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) + } + case ConstraintVersion: + if _, err := version.NewConstraint(c.RTarget); err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err)) + } + case ConstraintDistinctProperty: + // If a count is set, make sure it is convertible to a uint64 + if c.RTarget != "" { + count, err := strconv.ParseUint(c.RTarget, 10, 64) + if err != nil { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err)) + } else if count < 1 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count)) + } + } + case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": + if c.RTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand)) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand)) + } + + // Ensure we have an LTarget for the constraints that need one + if requireLtarget && c.LTarget == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint")) + } + + return mErr.ErrorOrNil() +} + +// EphemeralDisk is an ephemeral disk object +type EphemeralDisk struct { + // Sticky indicates whether the allocation is sticky to a node + Sticky bool + + // SizeMB is the size of the local disk + SizeMB int + + // Migrate determines if Nomad client should migrate the allocation dir for + // sticky allocations + Migrate bool +} + +// DefaultEphemeralDisk returns a EphemeralDisk with default configurations +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + SizeMB: 300, + } +} + +// Validate validates EphemeralDisk +func (d *EphemeralDisk) Validate() error { + if d.SizeMB < 10 { + return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB) + } + return nil +} + +// Copy copies the EphemeralDisk struct and returns a new one +func (d *EphemeralDisk) Copy() *EphemeralDisk { + ld := new(EphemeralDisk) + *ld = *d + return ld +} + +const ( + // VaultChangeModeNoop takes no action when a new token is retrieved. + VaultChangeModeNoop = "noop" + + // VaultChangeModeSignal signals the task when a new token is retrieved. + VaultChangeModeSignal = "signal" + + // VaultChangeModeRestart restarts the task when a new token is retrieved. + VaultChangeModeRestart = "restart" +) + +// Vault stores the set of permissions a task needs access to from Vault. +type Vault struct { + // Policies is the set of policies that the task needs access to + Policies []string + + // Env marks whether the Vault Token should be exposed as an environment + // variable + Env bool + + // ChangeMode is used to configure the task's behavior when the Vault + // token changes because the original token could not be renewed in time. + ChangeMode string + + // ChangeSignal is the signal sent to the task when a new token is + // retrieved. This is only valid when using the signal change mode. + ChangeSignal string +} + +func DefaultVaultBlock() *Vault { + return &Vault{ + Env: true, + ChangeMode: VaultChangeModeRestart, + } +} + +// Copy returns a copy of this Vault block. +func (v *Vault) Copy() *Vault { + if v == nil { + return nil + } + + nv := new(Vault) + *nv = *v + return nv +} + +func (v *Vault) Canonicalize() { + if v.ChangeSignal != "" { + v.ChangeSignal = strings.ToUpper(v.ChangeSignal) + } +} + +// Validate returns if the Vault block is valid. +func (v *Vault) Validate() error { + if v == nil { + return nil + } + + var mErr multierror.Error + if len(v.Policies) == 0 { + multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) + } + + for _, p := range v.Policies { + if p == "root" { + multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) + } + } + + switch v.ChangeMode { + case VaultChangeModeSignal: + if v.ChangeSignal == "" { + multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) + } + case VaultChangeModeNoop, VaultChangeModeRestart: + default: + multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) + } + + return mErr.ErrorOrNil() +} + +const ( + // DeploymentStatuses are the various states a deployment can be be in + DeploymentStatusRunning = "running" + DeploymentStatusPaused = "paused" + DeploymentStatusFailed = "failed" + DeploymentStatusSuccessful = "successful" + DeploymentStatusCancelled = "cancelled" + + // DeploymentStatusDescriptions are the various descriptions of the states a + // deployment can be in. + DeploymentStatusDescriptionRunning = "Deployment is running" + DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion" + DeploymentStatusDescriptionPaused = "Deployment is paused" + DeploymentStatusDescriptionSuccessful = "Deployment completed successfully" + DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped" + DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job" + DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations" + DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed" +) + +// DeploymentStatusDescriptionRollback is used to get the status description of +// a deployment when rolling back to an older job. +func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string { + return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion) +} + +// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of +// a deployment when there is no target to rollback to but autorevet is desired. +func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string { + return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription) +} + +// Deployment is the object that represents a job deployment which is used to +// transition a job between versions. +type Deployment struct { + // ID is a generated UUID for the deployment + ID string + + // Namespace is the namespace the deployment is created in + Namespace string + + // JobID is the job the deployment is created for + JobID string + + // JobVersion is the version of the job at which the deployment is tracking + JobVersion uint64 + + // JobModifyIndex is the modify index of the job at which the deployment is tracking + JobModifyIndex uint64 + + // JobCreateIndex is the create index of the job which the deployment is + // tracking. It is needed so that if the job gets stopped and reran we can + // present the correct list of deployments for the job and not old ones. + JobCreateIndex uint64 + + // TaskGroups is the set of task groups effected by the deployment and their + // current deployment status. + TaskGroups map[string]*DeploymentState + + // The status of the deployment + Status string + + // StatusDescription allows a human readable description of the deployment + // status. + StatusDescription string + + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewDeployment creates a new deployment given the job. +func NewDeployment(job *Job) *Deployment { + return &Deployment{ + ID: GenerateUUID(), + Namespace: job.Namespace, + JobID: job.ID, + JobVersion: job.Version, + JobModifyIndex: job.ModifyIndex, + JobCreateIndex: job.CreateIndex, + Status: DeploymentStatusRunning, + StatusDescription: DeploymentStatusDescriptionRunning, + TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), + } +} + +func (d *Deployment) Copy() *Deployment { + if d == nil { + return nil + } + + c := &Deployment{} + *c = *d + + c.TaskGroups = nil + if l := len(d.TaskGroups); d.TaskGroups != nil { + c.TaskGroups = make(map[string]*DeploymentState, l) + for tg, s := range d.TaskGroups { + c.TaskGroups[tg] = s.Copy() + } + } + + return c +} + +// Active returns whether the deployment is active or terminal. +func (d *Deployment) Active() bool { + switch d.Status { + case DeploymentStatusRunning, DeploymentStatusPaused: + return true + default: + return false + } +} + +// GetID is a helper for getting the ID when the object may be nil +func (d *Deployment) GetID() string { + if d == nil { + return "" + } + return d.ID +} + +// HasPlacedCanaries returns whether the deployment has placed canaries +func (d *Deployment) HasPlacedCanaries() bool { + if d == nil || len(d.TaskGroups) == 0 { + return false + } + for _, group := range d.TaskGroups { + if len(group.PlacedCanaries) != 0 { + return true + } + } + return false +} + +// RequiresPromotion returns whether the deployment requires promotion to +// continue +func (d *Deployment) RequiresPromotion() bool { + if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning { + return false + } + for _, group := range d.TaskGroups { + if group.DesiredCanaries > 0 && !group.Promoted { + return true + } + } + return false +} + +func (d *Deployment) GoString() string { + base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription) + for group, state := range d.TaskGroups { + base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state) + } + return base +} + +// DeploymentState tracks the state of a deployment for a given task group. +type DeploymentState struct { + // AutoRevert marks whether the task group has indicated the job should be + // reverted on failure + AutoRevert bool + + // Promoted marks whether the canaries have been promoted + Promoted bool + + // PlacedCanaries is the set of placed canary allocations + PlacedCanaries []string + + // DesiredCanaries is the number of canaries that should be created. + DesiredCanaries int + + // DesiredTotal is the total number of allocations that should be created as + // part of the deployment. + DesiredTotal int + + // PlacedAllocs is the number of allocations that have been placed + PlacedAllocs int + + // HealthyAllocs is the number of allocations that have been marked healthy. + HealthyAllocs int + + // UnhealthyAllocs are allocations that have been marked as unhealthy. + UnhealthyAllocs int +} + +func (d *DeploymentState) GoString() string { + base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal) + base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries) + base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries) + base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted) + base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs) + base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs) + base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs) + base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert) + return base +} + +func (d *DeploymentState) Copy() *DeploymentState { + c := &DeploymentState{} + *c = *d + c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries) + return c +} + +// DeploymentStatusUpdate is used to update the status of a given deployment +type DeploymentStatusUpdate struct { + // DeploymentID is the ID of the deployment to update + DeploymentID string + + // Status is the new status of the deployment. + Status string + + // StatusDescription is the new status description of the deployment. + StatusDescription string +} + +const ( + AllocDesiredStatusRun = "run" // Allocation should run + AllocDesiredStatusStop = "stop" // Allocation should stop + AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted +) + +const ( + AllocClientStatusPending = "pending" + AllocClientStatusRunning = "running" + AllocClientStatusComplete = "complete" + AllocClientStatusFailed = "failed" + AllocClientStatusLost = "lost" +) + +// Allocation is used to allocate the placement of a task group to a node. +type Allocation struct { + // ID of the allocation (UUID) + ID string + + // Namespace is the namespace the allocation is created in + Namespace string + + // ID of the evaluation that generated this allocation + EvalID string + + // Name is a logical name of the allocation. + Name string + + // NodeID is the node this is being placed on + NodeID string + + // Job is the parent job of the task group being allocated. + // This is copied at allocation time to avoid issues if the job + // definition is updated. + JobID string + Job *Job + + // TaskGroup is the name of the task group that should be run + TaskGroup string + + // Resources is the total set of resources allocated as part + // of this allocation of the task group. + Resources *Resources + + // SharedResources are the resources that are shared by all the tasks in an + // allocation + SharedResources *Resources + + // TaskResources is the set of resources allocated to each + // task. These should sum to the total Resources. + TaskResources map[string]*Resources + + // Metrics associated with this allocation + Metrics *AllocMetric + + // Desired Status of the allocation on the client + DesiredStatus string + + // DesiredStatusDescription is meant to provide more human useful information + DesiredDescription string + + // Status of the allocation on the client + ClientStatus string + + // ClientStatusDescription is meant to provide more human useful information + ClientDescription string + + // TaskStates stores the state of each task, + TaskStates map[string]*TaskState + + // PreviousAllocation is the allocation that this allocation is replacing + PreviousAllocation string + + // DeploymentID identifies an allocation as being created from a + // particular deployment + DeploymentID string + + // DeploymentStatus captures the status of the allocation as part of the + // given deployment + DeploymentStatus *AllocDeploymentStatus + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 + + // AllocModifyIndex is not updated when the client updates allocations. This + // lets the client pull only the allocs updated by the server. + AllocModifyIndex uint64 + + // CreateTime is the time the allocation has finished scheduling and been + // verified by the plan applier. + CreateTime int64 +} + +// Index returns the index of the allocation. If the allocation is from a task +// group with count greater than 1, there will be multiple allocations for it. +func (a *Allocation) Index() uint { + l := len(a.Name) + prefix := len(a.JobID) + len(a.TaskGroup) + 2 + if l <= 3 || l <= prefix { + return uint(0) + } + + strNum := a.Name[prefix : len(a.Name)-1] + num, _ := strconv.Atoi(strNum) + return uint(num) +} + +func (a *Allocation) Copy() *Allocation { + return a.copyImpl(true) +} + +// Copy provides a copy of the allocation but doesn't deep copy the job +func (a *Allocation) CopySkipJob() *Allocation { + return a.copyImpl(false) +} + +func (a *Allocation) copyImpl(job bool) *Allocation { + if a == nil { + return nil + } + na := new(Allocation) + *na = *a + + if job { + na.Job = na.Job.Copy() + } + + na.Resources = na.Resources.Copy() + na.SharedResources = na.SharedResources.Copy() + + if a.TaskResources != nil { + tr := make(map[string]*Resources, len(na.TaskResources)) + for task, resource := range na.TaskResources { + tr[task] = resource.Copy() + } + na.TaskResources = tr + } + + na.Metrics = na.Metrics.Copy() + na.DeploymentStatus = na.DeploymentStatus.Copy() + + if a.TaskStates != nil { + ts := make(map[string]*TaskState, len(na.TaskStates)) + for task, state := range na.TaskStates { + ts[task] = state.Copy() + } + na.TaskStates = ts + } + return na +} + +// TerminalStatus returns if the desired or actual status is terminal and +// will no longer transition. +func (a *Allocation) TerminalStatus() bool { + // First check the desired state and if that isn't terminal, check client + // state. + switch a.DesiredStatus { + case AllocDesiredStatusStop, AllocDesiredStatusEvict: + return true + default: + } + + switch a.ClientStatus { + case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: + return true + default: + return false + } +} + +// Terminated returns if the allocation is in a terminal state on a client. +func (a *Allocation) Terminated() bool { + if a.ClientStatus == AllocClientStatusFailed || + a.ClientStatus == AllocClientStatusComplete || + a.ClientStatus == AllocClientStatusLost { + return true + } + return false +} + +// RanSuccessfully returns whether the client has ran the allocation and all +// tasks finished successfully +func (a *Allocation) RanSuccessfully() bool { + return a.ClientStatus == AllocClientStatusComplete +} + +// ShouldMigrate returns if the allocation needs data migration +func (a *Allocation) ShouldMigrate() bool { + if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict { + return false + } + + tg := a.Job.LookupTaskGroup(a.TaskGroup) + + // if the task group is nil or the ephemeral disk block isn't present then + // we won't migrate + if tg == nil || tg.EphemeralDisk == nil { + return false + } + + // We won't migrate any data is the user hasn't enabled migration or the + // disk is not marked as sticky + if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky { + return false + } + + return true +} + +// Stub returns a list stub for the allocation +func (a *Allocation) Stub() *AllocListStub { + return &AllocListStub{ + ID: a.ID, + EvalID: a.EvalID, + Name: a.Name, + NodeID: a.NodeID, + JobID: a.JobID, + JobVersion: a.Job.Version, + TaskGroup: a.TaskGroup, + DesiredStatus: a.DesiredStatus, + DesiredDescription: a.DesiredDescription, + ClientStatus: a.ClientStatus, + ClientDescription: a.ClientDescription, + TaskStates: a.TaskStates, + DeploymentStatus: a.DeploymentStatus, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + CreateTime: a.CreateTime, + } +} + +// AllocListStub is used to return a subset of alloc information +type AllocListStub struct { + ID string + EvalID string + Name string + NodeID string + JobID string + JobVersion uint64 + TaskGroup string + DesiredStatus string + DesiredDescription string + ClientStatus string + ClientDescription string + TaskStates map[string]*TaskState + DeploymentStatus *AllocDeploymentStatus + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 +} + +// AllocMetric is used to track various metrics while attempting +// to make an allocation. These are used to debug a job, or to better +// understand the pressure within the system. +type AllocMetric struct { + // NodesEvaluated is the number of nodes that were evaluated + NodesEvaluated int + + // NodesFiltered is the number of nodes filtered due to a constraint + NodesFiltered int + + // NodesAvailable is the number of nodes available for evaluation per DC. + NodesAvailable map[string]int + + // ClassFiltered is the number of nodes filtered by class + ClassFiltered map[string]int + + // ConstraintFiltered is the number of failures caused by constraint + ConstraintFiltered map[string]int + + // NodesExhausted is the number of nodes skipped due to being + // exhausted of at least one resource + NodesExhausted int + + // ClassExhausted is the number of nodes exhausted by class + ClassExhausted map[string]int + + // DimensionExhausted provides the count by dimension or reason + DimensionExhausted map[string]int + + // Scores is the scores of the final few nodes remaining + // for placement. The top score is typically selected. + Scores map[string]float64 + + // AllocationTime is a measure of how long the allocation + // attempt took. This can affect performance and SLAs. + AllocationTime time.Duration + + // CoalescedFailures indicates the number of other + // allocations that were coalesced into this failed allocation. + // This is to prevent creating many failed allocations for a + // single task group. + CoalescedFailures int +} + +func (a *AllocMetric) Copy() *AllocMetric { + if a == nil { + return nil + } + na := new(AllocMetric) + *na = *a + na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable) + na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered) + na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered) + na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted) + na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted) + na.Scores = helper.CopyMapStringFloat64(na.Scores) + return na +} + +func (a *AllocMetric) EvaluateNode() { + a.NodesEvaluated += 1 +} + +func (a *AllocMetric) FilterNode(node *Node, constraint string) { + a.NodesFiltered += 1 + if node != nil && node.NodeClass != "" { + if a.ClassFiltered == nil { + a.ClassFiltered = make(map[string]int) + } + a.ClassFiltered[node.NodeClass] += 1 + } + if constraint != "" { + if a.ConstraintFiltered == nil { + a.ConstraintFiltered = make(map[string]int) + } + a.ConstraintFiltered[constraint] += 1 + } +} + +func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) { + a.NodesExhausted += 1 + if node != nil && node.NodeClass != "" { + if a.ClassExhausted == nil { + a.ClassExhausted = make(map[string]int) + } + a.ClassExhausted[node.NodeClass] += 1 + } + if dimension != "" { + if a.DimensionExhausted == nil { + a.DimensionExhausted = make(map[string]int) + } + a.DimensionExhausted[dimension] += 1 + } +} + +func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) { + if a.Scores == nil { + a.Scores = make(map[string]float64) + } + key := fmt.Sprintf("%s.%s", node.ID, name) + a.Scores[key] = score +} + +// AllocDeploymentStatus captures the status of the allocation as part of the +// deployment. This can include things like if the allocation has been marked as +// heatlhy. +type AllocDeploymentStatus struct { + // Healthy marks whether the allocation has been marked healthy or unhealthy + // as part of a deployment. It can be unset if it has neither been marked + // healthy or unhealthy. + Healthy *bool + + // ModifyIndex is the raft index in which the deployment status was last + // changed. + ModifyIndex uint64 +} + +// IsHealthy returns if the allocation is marked as healthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsHealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && *a.Healthy +} + +// IsUnhealthy returns if the allocation is marked as unhealthy as part of a +// deployment +func (a *AllocDeploymentStatus) IsUnhealthy() bool { + if a == nil { + return false + } + + return a.Healthy != nil && !*a.Healthy +} + +func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { + if a == nil { + return nil + } + + c := new(AllocDeploymentStatus) + *c = *a + + if a.Healthy != nil { + c.Healthy = helper.BoolToPtr(*a.Healthy) + } + + return c +} + +const ( + EvalStatusBlocked = "blocked" + EvalStatusPending = "pending" + EvalStatusComplete = "complete" + EvalStatusFailed = "failed" + EvalStatusCancelled = "canceled" +) + +const ( + EvalTriggerJobRegister = "job-register" + EvalTriggerJobDeregister = "job-deregister" + EvalTriggerPeriodicJob = "periodic-job" + EvalTriggerNodeUpdate = "node-update" + EvalTriggerScheduled = "scheduled" + EvalTriggerRollingUpdate = "rolling-update" + EvalTriggerDeploymentWatcher = "deployment-watcher" + EvalTriggerFailedFollowUp = "failed-follow-up" + EvalTriggerMaxPlans = "max-plan-attempts" +) + +const ( + // CoreJobEvalGC is used for the garbage collection of evaluations + // and allocations. We periodically scan evaluations in a terminal state, + // in which all the corresponding allocations are also terminal. We + // delete these out of the system to bound the state. + CoreJobEvalGC = "eval-gc" + + // CoreJobNodeGC is used for the garbage collection of failed nodes. + // We periodically scan nodes in a terminal state, and if they have no + // corresponding allocations we delete these out of the system. + CoreJobNodeGC = "node-gc" + + // CoreJobJobGC is used for the garbage collection of eligible jobs. We + // periodically scan garbage collectible jobs and check if both their + // evaluations and allocations are terminal. If so, we delete these out of + // the system. + CoreJobJobGC = "job-gc" + + // CoreJobDeploymentGC is used for the garbage collection of eligible + // deployments. We periodically scan garbage collectible deployments and + // check if they are terminal. If so, we delete these out of the system. + CoreJobDeploymentGC = "deployment-gc" + + // CoreJobForceGC is used to force garbage collection of all GCable objects. + CoreJobForceGC = "force-gc" +) + +// Evaluation is used anytime we need to apply business logic as a result +// of a change to our desired state (job specification) or the emergent state +// (registered nodes). When the inputs change, we need to "evaluate" them, +// potentially taking action (allocation of work) or doing nothing if the state +// of the world does not require it. +type Evaluation struct { + // ID is a randonly generated UUID used for this evaluation. This + // is assigned upon the creation of the evaluation. + ID string + + // Namespace is the namespace the evaluation is created in + Namespace string + + // Priority is used to control scheduling importance and if this job + // can preempt other jobs. + Priority int + + // Type is used to control which schedulers are available to handle + // this evaluation. + Type string + + // TriggeredBy is used to give some insight into why this Eval + // was created. (Job change, node failure, alloc failure, etc). + TriggeredBy string + + // JobID is the job this evaluation is scoped to. Evaluations cannot + // be run in parallel for a given JobID, so we serialize on this. + JobID string + + // JobModifyIndex is the modify index of the job at the time + // the evaluation was created + JobModifyIndex uint64 + + // NodeID is the node that was affected triggering the evaluation. + NodeID string + + // NodeModifyIndex is the modify index of the node at the time + // the evaluation was created + NodeModifyIndex uint64 + + // DeploymentID is the ID of the deployment that triggered the evaluation. + DeploymentID string + + // Status of the evaluation + Status string + + // StatusDescription is meant to provide more human useful information + StatusDescription string + + // Wait is a minimum wait time for running the eval. This is used to + // support a rolling upgrade. + Wait time.Duration + + // NextEval is the evaluation ID for the eval created to do a followup. + // This is used to support rolling upgrades, where we need a chain of evaluations. + NextEval string + + // PreviousEval is the evaluation ID for the eval creating this one to do a followup. + // This is used to support rolling upgrades, where we need a chain of evaluations. + PreviousEval string + + // BlockedEval is the evaluation ID for a created blocked eval. A + // blocked eval will be created if all allocations could not be placed due + // to constraints or lacking resources. + BlockedEval string + + // FailedTGAllocs are task groups which have allocations that could not be + // made, but the metrics are persisted so that the user can use the feedback + // to determine the cause. + FailedTGAllocs map[string]*AllocMetric + + // ClassEligibility tracks computed node classes that have been explicitly + // marked as eligible or ineligible. + ClassEligibility map[string]bool + + // EscapedComputedClass marks whether the job has constraints that are not + // captured by computed node classes. + EscapedComputedClass bool + + // AnnotatePlan triggers the scheduler to provide additional annotations + // during the evaluation. This should not be set during normal operations. + AnnotatePlan bool + + // QueuedAllocations is the number of unplaced allocations at the time the + // evaluation was processed. The map is keyed by Task Group names. + QueuedAllocations map[string]int + + // SnapshotIndex is the Raft index of the snapshot used to process the + // evaluation. As such it will only be set once it has gone through the + // scheduler. + SnapshotIndex uint64 + + // Raft Indexes + CreateIndex uint64 + ModifyIndex uint64 +} + +// TerminalStatus returns if the current status is terminal and +// will no longer transition. +func (e *Evaluation) TerminalStatus() bool { + switch e.Status { + case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled: + return true + default: + return false + } +} + +func (e *Evaluation) GoString() string { + return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) +} + +func (e *Evaluation) Copy() *Evaluation { + if e == nil { + return nil + } + ne := new(Evaluation) + *ne = *e + + // Copy ClassEligibility + if e.ClassEligibility != nil { + classes := make(map[string]bool, len(e.ClassEligibility)) + for class, elig := range e.ClassEligibility { + classes[class] = elig + } + ne.ClassEligibility = classes + } + + // Copy FailedTGAllocs + if e.FailedTGAllocs != nil { + failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs)) + for tg, metric := range e.FailedTGAllocs { + failedTGs[tg] = metric.Copy() + } + ne.FailedTGAllocs = failedTGs + } + + // Copy queued allocations + if e.QueuedAllocations != nil { + queuedAllocations := make(map[string]int, len(e.QueuedAllocations)) + for tg, num := range e.QueuedAllocations { + queuedAllocations[tg] = num + } + ne.QueuedAllocations = queuedAllocations + } + + return ne +} + +// ShouldEnqueue checks if a given evaluation should be enqueued into the +// eval_broker +func (e *Evaluation) ShouldEnqueue() bool { + switch e.Status { + case EvalStatusPending: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// ShouldBlock checks if a given evaluation should be entered into the blocked +// eval tracker. +func (e *Evaluation) ShouldBlock() bool { + switch e.Status { + case EvalStatusBlocked: + return true + case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled: + return false + default: + panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) + } +} + +// MakePlan is used to make a plan from the given evaluation +// for a given Job +func (e *Evaluation) MakePlan(j *Job) *Plan { + p := &Plan{ + EvalID: e.ID, + Priority: e.Priority, + Job: j, + NodeUpdate: make(map[string][]*Allocation), + NodeAllocation: make(map[string][]*Allocation), + } + if j != nil { + p.AllAtOnce = j.AllAtOnce + } + return p +} + +// NextRollingEval creates an evaluation to followup this eval for rolling updates +func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerRollingUpdate, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// CreateBlockedEval creates a blocked evaluation to followup this eval to place any +// failed allocations. It takes the classes marked explicitly eligible or +// ineligible and whether the job has escaped computed node classes. +func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: e.TriggeredBy, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusBlocked, + PreviousEval: e.ID, + ClassEligibility: classEligibility, + EscapedComputedClass: escaped, + } +} + +// CreateFailedFollowUpEval creates a follow up evaluation when the current one +// has been marked as failed because it has hit the delivery limit and will not +// be retried by the eval_broker. +func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { + return &Evaluation{ + ID: GenerateUUID(), + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: EvalTriggerFailedFollowUp, + JobID: e.JobID, + JobModifyIndex: e.JobModifyIndex, + Status: EvalStatusPending, + Wait: wait, + PreviousEval: e.ID, + } +} + +// Plan is used to submit a commit plan for task allocations. These +// are submitted to the leader which verifies that resources have +// not been overcommitted before admiting the plan. +type Plan struct { + // EvalID is the evaluation ID this plan is associated with + EvalID string + + // EvalToken is used to prevent a split-brain processing of + // an evaluation. There should only be a single scheduler running + // an Eval at a time, but this could be violated after a leadership + // transition. This unique token is used to reject plans that are + // being submitted from a different leader. + EvalToken string + + // Priority is the priority of the upstream job + Priority int + + // AllAtOnce is used to control if incremental scheduling of task groups + // is allowed or if we must do a gang scheduling of the entire job. + // If this is false, a plan may be partially applied. Otherwise, the + // entire plan must be able to make progress. + AllAtOnce bool + + // Job is the parent job of all the allocations in the Plan. + // Since a Plan only involves a single Job, we can reduce the size + // of the plan by only including it once. + Job *Job + + // NodeUpdate contains all the allocations for each node. For each node, + // this is a list of the allocations to update to either stop or evict. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations for each node. + // The evicts must be considered prior to the allocations. + NodeAllocation map[string][]*Allocation + + // Annotations contains annotations by the scheduler to be used by operators + // to understand the decisions made by the scheduler. + Annotations *PlanAnnotations + + // Deployment is the deployment created or updated by the scheduler that + // should be applied by the planner. + Deployment *Deployment + + // DeploymentUpdates is a set of status updates to apply to the given + // deployments. This allows the scheduler to cancel any unneeded deployment + // because the job is stopped or the update block is removed. + DeploymentUpdates []*DeploymentStatusUpdate +} + +// AppendUpdate marks the allocation for eviction. The clientStatus of the +// allocation may be optionally set by passing in a non-empty value. +func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) { + newAlloc := new(Allocation) + *newAlloc = *alloc + + // If the job is not set in the plan we are deregistering a job so we + // extract the job from the allocation. + if p.Job == nil && newAlloc.Job != nil { + p.Job = newAlloc.Job + } + + // Normalize the job + newAlloc.Job = nil + + // Strip the resources as it can be rebuilt. + newAlloc.Resources = nil + + newAlloc.DesiredStatus = desiredStatus + newAlloc.DesiredDescription = desiredDesc + + if clientStatus != "" { + newAlloc.ClientStatus = clientStatus + } + + node := alloc.NodeID + existing := p.NodeUpdate[node] + p.NodeUpdate[node] = append(existing, newAlloc) +} + +func (p *Plan) PopUpdate(alloc *Allocation) { + existing := p.NodeUpdate[alloc.NodeID] + n := len(existing) + if n > 0 && existing[n-1].ID == alloc.ID { + existing = existing[:n-1] + if len(existing) > 0 { + p.NodeUpdate[alloc.NodeID] = existing + } else { + delete(p.NodeUpdate, alloc.NodeID) + } + } +} + +func (p *Plan) AppendAlloc(alloc *Allocation) { + node := alloc.NodeID + existing := p.NodeAllocation[node] + p.NodeAllocation[node] = append(existing, alloc) +} + +// IsNoOp checks if this plan would do nothing +func (p *Plan) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && + len(p.NodeAllocation) == 0 && + p.Deployment == nil && + len(p.DeploymentUpdates) == 0 +} + +// PlanResult is the result of a plan submitted to the leader. +type PlanResult struct { + // NodeUpdate contains all the updates that were committed. + NodeUpdate map[string][]*Allocation + + // NodeAllocation contains all the allocations that were committed. + NodeAllocation map[string][]*Allocation + + // Deployment is the deployment that was committed. + Deployment *Deployment + + // DeploymentUpdates is the set of deployment updates that were committed. + DeploymentUpdates []*DeploymentStatusUpdate + + // RefreshIndex is the index the worker should refresh state up to. + // This allows all evictions and allocations to be materialized. + // If any allocations were rejected due to stale data (node state, + // over committed) this can be used to force a worker refresh. + RefreshIndex uint64 + + // AllocIndex is the Raft index in which the evictions and + // allocations took place. This is used for the write index. + AllocIndex uint64 +} + +// IsNoOp checks if this plan result would do nothing +func (p *PlanResult) IsNoOp() bool { + return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 && + len(p.DeploymentUpdates) == 0 && p.Deployment == nil +} + +// FullCommit is used to check if all the allocations in a plan +// were committed as part of the result. Returns if there was +// a match, and the number of expected and actual allocations. +func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) { + expected := 0 + actual := 0 + for name, allocList := range plan.NodeAllocation { + didAlloc, _ := p.NodeAllocation[name] + expected += len(allocList) + actual += len(didAlloc) + } + return actual == expected, expected, actual +} + +// PlanAnnotations holds annotations made by the scheduler to give further debug +// information to operators. +type PlanAnnotations struct { + // DesiredTGUpdates is the set of desired updates per task group. + DesiredTGUpdates map[string]*DesiredUpdates +} + +// DesiredUpdates is the set of changes the scheduler would like to make given +// sufficient resources and cluster capacity. +type DesiredUpdates struct { + Ignore uint64 + Place uint64 + Migrate uint64 + Stop uint64 + InPlaceUpdate uint64 + DestructiveUpdate uint64 + Canary uint64 +} + +func (d *DesiredUpdates) GoString() string { + return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)", + d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary) +} + +// msgpackHandle is a shared handle for encoding/decoding of structs +var MsgpackHandle = func() *codec.MsgpackHandle { + h := &codec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +var ( + // JsonHandle and JsonHandlePretty are the codec handles to JSON encode + // structs. The pretty handle will add indents for easier human consumption. + JsonHandle = &codec.JsonHandle{ + HTMLCharsAsIs: true, + } + JsonHandlePretty = &codec.JsonHandle{ + HTMLCharsAsIs: true, + Indent: 4, + } +) + +var HashiMsgpackHandle = func() *hcodec.MsgpackHandle { + h := &hcodec.MsgpackHandle{RawToString: true} + + // Sets the default type for decoding a map into a nil interface{}. + // This is necessary in particular because we store the driver configs as a + // nil interface{}. + h.MapType = reflect.TypeOf(map[string]interface{}(nil)) + return h +}() + +// Decode is used to decode a MsgPack encoded object +func Decode(buf []byte, out interface{}) error { + return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) +} + +// Encode is used to encode a MsgPack object with type prefix +func Encode(t MessageType, msg interface{}) ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(uint8(t)) + err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) + return buf.Bytes(), err +} + +// KeyringResponse is a unified key response and can be used for install, +// remove, use, as well as listing key queries. +type KeyringResponse struct { + Messages map[string]string + Keys map[string]int + NumNodes int +} + +// KeyringRequest is request objects for serf key operations. +type KeyringRequest struct { + Key string +} + +// RecoverableError wraps an error and marks whether it is recoverable and could +// be retried or it is fatal. +type RecoverableError struct { + Err string + Recoverable bool +} + +// NewRecoverableError is used to wrap an error and mark it as recoverable or +// not. +func NewRecoverableError(e error, recoverable bool) error { + if e == nil { + return nil + } + + return &RecoverableError{ + Err: e.Error(), + Recoverable: recoverable, + } +} + +// WrapRecoverable wraps an existing error in a new RecoverableError with a new +// message. If the error was recoverable before the returned error is as well; +// otherwise it is unrecoverable. +func WrapRecoverable(msg string, err error) error { + return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} +} + +func (r *RecoverableError) Error() string { + return r.Err +} + +func (r *RecoverableError) IsRecoverable() bool { + return r.Recoverable +} + +// Recoverable is an interface for errors to implement to indicate whether or +// not they are fatal or recoverable. +type Recoverable interface { + error + IsRecoverable() bool +} + +// IsRecoverable returns true if error is a RecoverableError with +// Recoverable=true. Otherwise false is returned. +func IsRecoverable(e error) bool { + if re, ok := e.(Recoverable); ok { + return re.IsRecoverable() + } + return false +} + +// ACLPolicy is used to represent an ACL policy +type ACLPolicy struct { + Name string // Unique name + Description string // Human readable + Rules string // HCL or JSON format + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL policy +func (c *ACLPolicy) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(c.Name)) + hash.Write([]byte(c.Description)) + hash.Write([]byte(c.Rules)) + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + c.Hash = hashVal + return hashVal +} + +func (a *ACLPolicy) Stub() *ACLPolicyListStub { + return &ACLPolicyListStub{ + Name: a.Name, + Description: a.Description, + Hash: a.Hash, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +func (a *ACLPolicy) Validate() error { + var mErr multierror.Error + if !validPolicyName.MatchString(a.Name) { + err := fmt.Errorf("invalid name '%s'", a.Name) + mErr.Errors = append(mErr.Errors, err) + } + if _, err := acl.Parse(a.Rules); err != nil { + err = fmt.Errorf("failed to parse rules: %v", err) + mErr.Errors = append(mErr.Errors, err) + } + if len(a.Description) > maxPolicyDescriptionLength { + err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) + mErr.Errors = append(mErr.Errors, err) + } + return mErr.ErrorOrNil() +} + +// ACLPolicyListStub is used to for listing ACL policies +type ACLPolicyListStub struct { + Name string + Description string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// ACLPolicyListRequest is used to request a list of policies +type ACLPolicyListRequest struct { + QueryOptions +} + +// ACLPolicySpecificRequest is used to query a specific policy +type ACLPolicySpecificRequest struct { + Name string + QueryOptions +} + +// ACLPolicySetRequest is used to query a set of policies +type ACLPolicySetRequest struct { + Names []string + QueryOptions +} + +// ACLPolicyListResponse is used for a list request +type ACLPolicyListResponse struct { + Policies []*ACLPolicyListStub + QueryMeta +} + +// SingleACLPolicyResponse is used to return a single policy +type SingleACLPolicyResponse struct { + Policy *ACLPolicy + QueryMeta +} + +// ACLPolicySetResponse is used to return a set of policies +type ACLPolicySetResponse struct { + Policies map[string]*ACLPolicy + QueryMeta +} + +// ACLPolicyDeleteRequest is used to delete a set of policies +type ACLPolicyDeleteRequest struct { + Names []string + WriteRequest +} + +// ACLPolicyUpsertRequest is used to upsert a set of policies +type ACLPolicyUpsertRequest struct { + Policies []*ACLPolicy + WriteRequest +} + +// ACLToken represents a client token which is used to Authenticate +type ACLToken struct { + AccessorID string // Public Accessor ID (UUID) + SecretID string // Secret ID, private (UUID) + Name string // Human friendly name + Type string // Client or Management + Policies []string // Policies this token ties to + Global bool // Global or Region local + Hash []byte + CreateTime time.Time // Time of creation + CreateIndex uint64 + ModifyIndex uint64 +} + +var ( + // AnonymousACLToken is used no SecretID is provided, and the + // request is made anonymously. + AnonymousACLToken = &ACLToken{ + AccessorID: "anonymous", + Name: "Anonymous Token", + Type: ACLClientToken, + Policies: []string{"anonymous"}, + Global: false, + } +) + +type ACLTokenListStub struct { + AccessorID string + Name string + Type string + Policies []string + Global bool + Hash []byte + CreateTime time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +// SetHash is used to compute and set the hash of the ACL token +func (a *ACLToken) SetHash() []byte { + // Initialize a 256bit Blake2 hash (32 bytes) + hash, err := blake2b.New256(nil) + if err != nil { + panic(err) + } + + // Write all the user set fields + hash.Write([]byte(a.Name)) + hash.Write([]byte(a.Type)) + for _, policyName := range a.Policies { + hash.Write([]byte(policyName)) + } + if a.Global { + hash.Write([]byte("global")) + } else { + hash.Write([]byte("local")) + } + + // Finalize the hash + hashVal := hash.Sum(nil) + + // Set and return the hash + a.Hash = hashVal + return hashVal +} + +func (a *ACLToken) Stub() *ACLTokenListStub { + return &ACLTokenListStub{ + AccessorID: a.AccessorID, + Name: a.Name, + Type: a.Type, + Policies: a.Policies, + Global: a.Global, + Hash: a.Hash, + CreateTime: a.CreateTime, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + } +} + +// Validate is used to sanity check a token +func (a *ACLToken) Validate() error { + var mErr multierror.Error + if len(a.Name) > maxTokenNameLength { + mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long")) + } + switch a.Type { + case ACLClientToken: + if len(a.Policies) == 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies")) + } + case ACLManagementToken: + if len(a.Policies) != 0 { + mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies")) + } + default: + mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management")) + } + return mErr.ErrorOrNil() +} + +// PolicySubset checks if a given set of policies is a subset of the token +func (a *ACLToken) PolicySubset(policies []string) bool { + // Hot-path the management tokens, superset of all policies. + if a.Type == ACLManagementToken { + return true + } + associatedPolicies := make(map[string]struct{}, len(a.Policies)) + for _, policy := range a.Policies { + associatedPolicies[policy] = struct{}{} + } + for _, policy := range policies { + if _, ok := associatedPolicies[policy]; !ok { + return false + } + } + return true +} + +// ACLTokenListRequest is used to request a list of tokens +type ACLTokenListRequest struct { + GlobalOnly bool + QueryOptions +} + +// ACLTokenSpecificRequest is used to query a specific token +type ACLTokenSpecificRequest struct { + AccessorID string + QueryOptions +} + +// ACLTokenSetRequest is used to query a set of tokens +type ACLTokenSetRequest struct { + AccessorIDS []string + QueryOptions +} + +// ACLTokenListResponse is used for a list request +type ACLTokenListResponse struct { + Tokens []*ACLTokenListStub + QueryMeta +} + +// SingleACLTokenResponse is used to return a single token +type SingleACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenSetResponse is used to return a set of token +type ACLTokenSetResponse struct { + Tokens map[string]*ACLToken // Keyed by Accessor ID + QueryMeta +} + +// ResolveACLTokenRequest is used to resolve a specific token +type ResolveACLTokenRequest struct { + SecretID string + QueryOptions +} + +// ResolveACLTokenResponse is used to resolve a single token +type ResolveACLTokenResponse struct { + Token *ACLToken + QueryMeta +} + +// ACLTokenDeleteRequest is used to delete a set of tokens +type ACLTokenDeleteRequest struct { + AccessorIDs []string + WriteRequest +} + +// ACLTokenBootstrapRequest is used to bootstrap ACLs +type ACLTokenBootstrapRequest struct { + Token *ACLToken // Not client specifiable + ResetIndex uint64 // Reset index is used to clear the bootstrap token + WriteRequest +} + +// ACLTokenUpsertRequest is used to upsert a set of tokens +type ACLTokenUpsertRequest struct { + Tokens []*ACLToken + WriteRequest +} + +// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest +type ACLTokenUpsertResponse struct { + Tokens []*ACLToken + WriteMeta +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go new file mode 100644 index 0000000000..bdc324bb5c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go @@ -0,0 +1,3 @@ +package structs + +//go:generate codecgen -d 100 -o structs.generated.go structs.go diff --git a/vendor/vendor.json b/vendor/vendor.json index ce23fa2198..2551f9e614 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -990,6 +990,12 @@ "revision": "317e0006254c44a0ac427cc52a0e083ff0b9622f", "revisionTime": "2017-09-15T02:47:31Z" }, + { + "checksumSHA1": "5DBIm/bJOKLR3CbQH6wIELQDLlQ=", + "path": "github.com/gorhill/cronexpr", + "revision": "d520615e531a6bf3fb69406b9eba718261285ec8", + "revisionTime": "2016-12-05T14:13:22Z" + }, { "checksumSHA1": "O0r0hj4YL+jSRNjnshkeH4GY+4s=", "path": "github.com/hailocab/go-hostpool", @@ -1146,6 +1152,24 @@ "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", "revisionTime": "2017-09-20T19:48:06Z" }, + { + "checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=", + "path": "github.com/hashicorp/nomad/api/contexts", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, + { + "checksumSHA1": "GpikwcF9oi5Rrs/58xDSfiMy/I8=", + "path": "github.com/hashicorp/nomad/helper", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, + { + "checksumSHA1": "hrzGvgMsH9p6MKOu3zYS8fooL3g=", + "path": "github.com/hashicorp/nomad/nomad/structs", + "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", + "revisionTime": "2017-09-20T19:48:06Z" + }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", "path": "github.com/hashicorp/serf/coordinate", From 44aed8f71a633923bb2d3d61ae4d1a1c302f6bc0 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 2 Oct 2017 13:46:42 -0400 Subject: [PATCH 015/329] fixing dependencies --- vendor/github.com/hashicorp/nomad/api/jobs.go | 31 +- .../hashicorp/nomad/api/jobs_testing.go | 12 +- .../github.com/hashicorp/nomad/api/nodes.go | 2 +- .../hashicorp/nomad/api/operator.go | 2 +- .../github.com/hashicorp/nomad/api/tasks.go | 2 +- .../hashicorp/nomad/helper/funcs.go | 2 +- .../hashicorp/nomad/nomad/structs/bitmap.go | 78 - .../hashicorp/nomad/nomad/structs/diff.go | 1231 ---- .../hashicorp/nomad/nomad/structs/funcs.go | 310 - .../hashicorp/nomad/nomad/structs/network.go | 326 - .../nomad/nomad/structs/node_class.go | 94 - .../hashicorp/nomad/nomad/structs/operator.go | 49 - .../hashicorp/nomad/nomad/structs/structs.go | 5783 ----------------- .../nomad/nomad/structs/structs_codegen.go | 3 - vendor/vendor.json | 24 +- 15 files changed, 44 insertions(+), 7905 deletions(-) delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/diff.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/network.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/operator.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs.go delete mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go diff --git a/vendor/github.com/hashicorp/nomad/api/jobs.go b/vendor/github.com/hashicorp/nomad/api/jobs.go index 4ec71af4ae..e68bef1e7a 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs.go @@ -9,7 +9,6 @@ import ( "github.com/gorhill/cronexpr" "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" ) const ( @@ -330,6 +329,20 @@ type UpdateStrategy struct { Canary *int `mapstructure:"canary"` } +// DefaultUpdateStrategy provides a baseline that can be used to upgrade +// jobs with the old policy or for populating field defaults. +func DefaultUpdateStrategy() *UpdateStrategy { + return &UpdateStrategy{ + Stagger: helper.TimeToPtr(30 * time.Second), + MaxParallel: helper.IntToPtr(1), + HealthCheck: helper.StringToPtr("checks"), + MinHealthyTime: helper.TimeToPtr(10 * time.Second), + HealthyDeadline: helper.TimeToPtr(5 * time.Minute), + AutoRevert: helper.BoolToPtr(false), + Canary: helper.IntToPtr(0), + } +} + func (u *UpdateStrategy) Copy() *UpdateStrategy { if u == nil { return nil @@ -403,34 +416,34 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) { } func (u *UpdateStrategy) Canonicalize() { - d := structs.DefaultUpdateStrategy + d := DefaultUpdateStrategy() if u.MaxParallel == nil { - u.MaxParallel = helper.IntToPtr(d.MaxParallel) + u.MaxParallel = d.MaxParallel } if u.Stagger == nil { - u.Stagger = helper.TimeToPtr(d.Stagger) + u.Stagger = d.Stagger } if u.HealthCheck == nil { - u.HealthCheck = helper.StringToPtr(d.HealthCheck) + u.HealthCheck = d.HealthCheck } if u.HealthyDeadline == nil { - u.HealthyDeadline = helper.TimeToPtr(d.HealthyDeadline) + u.HealthyDeadline = d.HealthyDeadline } if u.MinHealthyTime == nil { - u.MinHealthyTime = helper.TimeToPtr(d.MinHealthyTime) + u.MinHealthyTime = d.MinHealthyTime } if u.AutoRevert == nil { - u.AutoRevert = helper.BoolToPtr(d.AutoRevert) + u.AutoRevert = d.AutoRevert } if u.Canary == nil { - u.Canary = helper.IntToPtr(d.Canary) + u.Canary = d.Canary } } diff --git a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go index bed9ac4748..1bd47496cc 100644 --- a/vendor/github.com/hashicorp/nomad/api/jobs_testing.go +++ b/vendor/github.com/hashicorp/nomad/api/jobs_testing.go @@ -4,27 +4,27 @@ import ( "time" "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/helper/uuid" ) func MockJob() *Job { job := &Job{ Region: helper.StringToPtr("global"), - ID: helper.StringToPtr(structs.GenerateUUID()), + ID: helper.StringToPtr(uuid.Generate()), Name: helper.StringToPtr("my-job"), Type: helper.StringToPtr("service"), Priority: helper.IntToPtr(50), AllAtOnce: helper.BoolToPtr(false), Datacenters: []string{"dc1"}, Constraints: []*Constraint{ - &Constraint{ + { LTarget: "${attr.kernel.name}", RTarget: "linux", Operand: "=", }, }, TaskGroups: []*TaskGroup{ - &TaskGroup{ + { Name: helper.StringToPtr("web"), Count: helper.IntToPtr(10), EphemeralDisk: &EphemeralDisk{ @@ -37,7 +37,7 @@ func MockJob() *Job { Mode: helper.StringToPtr("delay"), }, Tasks: []*Task{ - &Task{ + { Name: "web", Driver: "exec", Config: map[string]interface{}{ @@ -72,7 +72,7 @@ func MockJob() *Job { CPU: helper.IntToPtr(500), MemoryMB: helper.IntToPtr(256), Networks: []*NetworkResource{ - &NetworkResource{ + { MBits: helper.IntToPtr(50), DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, }, diff --git a/vendor/github.com/hashicorp/nomad/api/nodes.go b/vendor/github.com/hashicorp/nomad/api/nodes.go index 50a159628a..e1ef5e2aaa 100644 --- a/vendor/github.com/hashicorp/nomad/api/nodes.go +++ b/vendor/github.com/hashicorp/nomad/api/nodes.go @@ -22,7 +22,7 @@ func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) { if err != nil { return nil, nil, err } - sort.Sort(NodeIndexSort(resp)) + sort.Sort(resp) return resp, qm, nil } diff --git a/vendor/github.com/hashicorp/nomad/api/operator.go b/vendor/github.com/hashicorp/nomad/api/operator.go index a10648a295..a83d54cb37 100644 --- a/vendor/github.com/hashicorp/nomad/api/operator.go +++ b/vendor/github.com/hashicorp/nomad/api/operator.go @@ -75,7 +75,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err // TODO (alexdadgar) Currently we made address a query parameter. Once // IDs are in place this will be DELETE /v1/operator/raft/peer/. - r.params.Set("address", string(address)) + r.params.Set("address", address) _, resp, err := requireOK(op.c.doRequest(r)) if err != nil { diff --git a/vendor/github.com/hashicorp/nomad/api/tasks.go b/vendor/github.com/hashicorp/nomad/api/tasks.go index a3d10831e8..3233c99638 100644 --- a/vendor/github.com/hashicorp/nomad/api/tasks.go +++ b/vendor/github.com/hashicorp/nomad/api/tasks.go @@ -83,7 +83,7 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { // failing health checks. type CheckRestart struct { Limit int `mapstructure:"limit"` - Grace *time.Duration `mapstructure:"grace_period"` + Grace *time.Duration `mapstructure:"grace"` IgnoreWarnings bool `mapstructure:"ignore_warnings"` } diff --git a/vendor/github.com/hashicorp/nomad/helper/funcs.go b/vendor/github.com/hashicorp/nomad/helper/funcs.go index 0b07960591..19911941fe 100644 --- a/vendor/github.com/hashicorp/nomad/helper/funcs.go +++ b/vendor/github.com/hashicorp/nomad/helper/funcs.go @@ -180,7 +180,7 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} { } c := make(map[string]struct{}, l) - for k, _ := range m { + for k := range m { c[k] = struct{}{} } return c diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go b/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go deleted file mode 100644 index 63758a0be6..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/bitmap.go +++ /dev/null @@ -1,78 +0,0 @@ -package structs - -import "fmt" - -// Bitmap is a simple uncompressed bitmap -type Bitmap []byte - -// NewBitmap returns a bitmap with up to size indexes -func NewBitmap(size uint) (Bitmap, error) { - if size == 0 { - return nil, fmt.Errorf("bitmap must be positive size") - } - if size&7 != 0 { - return nil, fmt.Errorf("bitmap must be byte aligned") - } - b := make([]byte, size>>3) - return Bitmap(b), nil -} - -// Copy returns a copy of the Bitmap -func (b Bitmap) Copy() (Bitmap, error) { - if b == nil { - return nil, fmt.Errorf("can't copy nil Bitmap") - } - - raw := make([]byte, len(b)) - copy(raw, b) - return Bitmap(raw), nil -} - -// Size returns the size of the bitmap -func (b Bitmap) Size() uint { - return uint(len(b) << 3) -} - -// Set is used to set the given index of the bitmap -func (b Bitmap) Set(idx uint) { - bucket := idx >> 3 - mask := byte(1 << (idx & 7)) - b[bucket] |= mask -} - -// Unset is used to unset the given index of the bitmap -func (b Bitmap) Unset(idx uint) { - bucket := idx >> 3 - // Mask should be all ones minus the idx position - offset := 1 << (idx & 7) - mask := byte(offset ^ 0xff) - b[bucket] &= mask -} - -// Check is used to check the given index of the bitmap -func (b Bitmap) Check(idx uint) bool { - bucket := idx >> 3 - mask := byte(1 << (idx & 7)) - return (b[bucket] & mask) != 0 -} - -// Clear is used to efficiently clear the bitmap -func (b Bitmap) Clear() { - for i := range b { - b[i] = 0 - } -} - -// IndexesInRange returns the indexes in which the values are either set or unset based -// on the passed parameter in the passed range -func (b Bitmap) IndexesInRange(set bool, from, to uint) []int { - var indexes []int - for i := from; i <= to && i < b.Size(); i++ { - c := b.Check(i) - if c && set || !c && !set { - indexes = append(indexes, int(i)) - } - } - - return indexes -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go b/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go deleted file mode 100644 index b8ced9180f..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/diff.go +++ /dev/null @@ -1,1231 +0,0 @@ -package structs - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "github.com/hashicorp/nomad/helper/flatmap" - "github.com/mitchellh/hashstructure" -) - -// DiffType denotes the type of a diff object. -type DiffType string - -var ( - DiffTypeNone DiffType = "None" - DiffTypeAdded DiffType = "Added" - DiffTypeDeleted DiffType = "Deleted" - DiffTypeEdited DiffType = "Edited" -) - -func (d DiffType) Less(other DiffType) bool { - // Edited > Added > Deleted > None - // But we do a reverse sort - if d == other { - return false - } - - if d == DiffTypeEdited { - return true - } else if other == DiffTypeEdited { - return false - } else if d == DiffTypeAdded { - return true - } else if other == DiffTypeAdded { - return false - } else if d == DiffTypeDeleted { - return true - } else if other == DiffTypeDeleted { - return false - } - - return true -} - -// JobDiff contains the diff of two jobs. -type JobDiff struct { - Type DiffType - ID string - Fields []*FieldDiff - Objects []*ObjectDiff - TaskGroups []*TaskGroupDiff -} - -// Diff returns a diff of two jobs and a potential error if the Jobs are not -// diffable. If contextual diff is enabled, objects within the job will contain -// field information even if unchanged. -func (j *Job) Diff(other *Job, contextual bool) (*JobDiff, error) { - // COMPAT: Remove "Update" in 0.7.0. Update pushed down to task groups - // in 0.6.0 - diff := &JobDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"ID", "Status", "StatusDescription", "Version", "Stable", "CreateIndex", - "ModifyIndex", "JobModifyIndex", "Update", "SubmitTime"} - - if j == nil && other == nil { - return diff, nil - } else if j == nil { - j = &Job{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - diff.ID = other.ID - } else if other == nil { - other = &Job{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(j, filter, true) - diff.ID = j.ID - } else { - if j.ID != other.ID { - return nil, fmt.Errorf("can not diff jobs with different IDs: %q and %q", j.ID, other.ID) - } - - oldPrimitiveFlat = flatmap.Flatten(j, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - diff.ID = other.ID - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Datacenters diff - if setDiff := stringSetDiff(j.Datacenters, other.Datacenters, "Datacenters", contextual); setDiff != nil && setDiff.Type != DiffTypeNone { - diff.Objects = append(diff.Objects, setDiff) - } - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(j.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Task groups diff - tgs, err := taskGroupDiffs(j.TaskGroups, other.TaskGroups, contextual) - if err != nil { - return nil, err - } - diff.TaskGroups = tgs - - // Periodic diff - if pDiff := primitiveObjectDiff(j.Periodic, other.Periodic, nil, "Periodic", contextual); pDiff != nil { - diff.Objects = append(diff.Objects, pDiff) - } - - // ParameterizedJob diff - if cDiff := parameterizedJobDiff(j.ParameterizedJob, other.ParameterizedJob, contextual); cDiff != nil { - diff.Objects = append(diff.Objects, cDiff) - } - - // Check to see if there is a diff. We don't use reflect because we are - // filtering quite a few fields that will change on each diff. - if diff.Type == DiffTypeNone { - for _, fd := range diff.Fields { - if fd.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - if diff.Type == DiffTypeNone { - for _, od := range diff.Objects { - if od.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - if diff.Type == DiffTypeNone { - for _, tg := range diff.TaskGroups { - if tg.Type != DiffTypeNone { - diff.Type = DiffTypeEdited - break - } - } - } - - return diff, nil -} - -func (j *JobDiff) GoString() string { - out := fmt.Sprintf("Job %q (%s):\n", j.ID, j.Type) - - for _, f := range j.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range j.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - for _, tg := range j.TaskGroups { - out += fmt.Sprintf("%#v\n", tg) - } - - return out -} - -// TaskGroupDiff contains the diff of two task groups. -type TaskGroupDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Tasks []*TaskDiff - Updates map[string]uint64 -} - -// Diff returns a diff of two task groups. If contextual diff is enabled, -// objects' fields will be stored even if no diff occurred as long as one field -// changed. -func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, error) { - diff := &TaskGroupDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Name"} - - if tg == nil && other == nil { - return diff, nil - } else if tg == nil { - tg = &TaskGroup{} - diff.Type = DiffTypeAdded - diff.Name = other.Name - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &TaskGroup{} - diff.Type = DiffTypeDeleted - diff.Name = tg.Name - oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) - } else { - if !reflect.DeepEqual(tg, other) { - diff.Type = DiffTypeEdited - } - if tg.Name != other.Name { - return nil, fmt.Errorf("can not diff task groups with different names: %q and %q", tg.Name, other.Name) - } - diff.Name = other.Name - oldPrimitiveFlat = flatmap.Flatten(tg, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(tg.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Restart policy diff - rDiff := primitiveObjectDiff(tg.RestartPolicy, other.RestartPolicy, nil, "RestartPolicy", contextual) - if rDiff != nil { - diff.Objects = append(diff.Objects, rDiff) - } - - // EphemeralDisk diff - diskDiff := primitiveObjectDiff(tg.EphemeralDisk, other.EphemeralDisk, nil, "EphemeralDisk", contextual) - if diskDiff != nil { - diff.Objects = append(diff.Objects, diskDiff) - } - - // Update diff - // COMPAT: Remove "Stagger" in 0.7.0. - if uDiff := primitiveObjectDiff(tg.Update, other.Update, []string{"Stagger"}, "Update", contextual); uDiff != nil { - diff.Objects = append(diff.Objects, uDiff) - } - - // Tasks diff - tasks, err := taskDiffs(tg.Tasks, other.Tasks, contextual) - if err != nil { - return nil, err - } - diff.Tasks = tasks - - return diff, nil -} - -func (tg *TaskGroupDiff) GoString() string { - out := fmt.Sprintf("Group %q (%s):\n", tg.Name, tg.Type) - - if len(tg.Updates) != 0 { - out += "Updates {\n" - for update, count := range tg.Updates { - out += fmt.Sprintf("%d %s\n", count, update) - } - out += "}\n" - } - - for _, f := range tg.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range tg.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - for _, t := range tg.Tasks { - out += fmt.Sprintf("%#v\n", t) - } - - return out -} - -// TaskGroupDiffs diffs two sets of task groups. If contextual diff is enabled, -// objects' fields will be stored even if no diff occurred as long as one field -// changed. -func taskGroupDiffs(old, new []*TaskGroup, contextual bool) ([]*TaskGroupDiff, error) { - oldMap := make(map[string]*TaskGroup, len(old)) - newMap := make(map[string]*TaskGroup, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*TaskGroupDiff - for name, oldGroup := range oldMap { - // Diff the same, deleted and edited - diff, err := oldGroup.Diff(newMap[name], contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - - for name, newGroup := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - diff, err := old.Diff(newGroup, contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - } - - sort.Sort(TaskGroupDiffs(diffs)) - return diffs, nil -} - -// For sorting TaskGroupDiffs -type TaskGroupDiffs []*TaskGroupDiff - -func (tg TaskGroupDiffs) Len() int { return len(tg) } -func (tg TaskGroupDiffs) Swap(i, j int) { tg[i], tg[j] = tg[j], tg[i] } -func (tg TaskGroupDiffs) Less(i, j int) bool { return tg[i].Name < tg[j].Name } - -// TaskDiff contains the diff of two Tasks -type TaskDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff - Annotations []string -} - -// Diff returns a diff of two tasks. If contextual diff is enabled, objects -// within the task will contain field information even if unchanged. -func (t *Task) Diff(other *Task, contextual bool) (*TaskDiff, error) { - diff := &TaskDiff{Type: DiffTypeNone} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Name", "Config"} - - if t == nil && other == nil { - return diff, nil - } else if t == nil { - t = &Task{} - diff.Type = DiffTypeAdded - diff.Name = other.Name - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &Task{} - diff.Type = DiffTypeDeleted - diff.Name = t.Name - oldPrimitiveFlat = flatmap.Flatten(t, filter, true) - } else { - if !reflect.DeepEqual(t, other) { - diff.Type = DiffTypeEdited - } - if t.Name != other.Name { - return nil, fmt.Errorf("can not diff tasks with different names: %q and %q", t.Name, other.Name) - } - diff.Name = other.Name - oldPrimitiveFlat = flatmap.Flatten(t, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, false) - - // Constraints diff - conDiff := primitiveObjectSetDiff( - interfaceSlice(t.Constraints), - interfaceSlice(other.Constraints), - []string{"str"}, - "Constraint", - contextual) - if conDiff != nil { - diff.Objects = append(diff.Objects, conDiff...) - } - - // Config diff - if cDiff := configDiff(t.Config, other.Config, contextual); cDiff != nil { - diff.Objects = append(diff.Objects, cDiff) - } - - // Resources diff - if rDiff := t.Resources.Diff(other.Resources, contextual); rDiff != nil { - diff.Objects = append(diff.Objects, rDiff) - } - - // LogConfig diff - lDiff := primitiveObjectDiff(t.LogConfig, other.LogConfig, nil, "LogConfig", contextual) - if lDiff != nil { - diff.Objects = append(diff.Objects, lDiff) - } - - // Dispatch payload diff - dDiff := primitiveObjectDiff(t.DispatchPayload, other.DispatchPayload, nil, "DispatchPayload", contextual) - if dDiff != nil { - diff.Objects = append(diff.Objects, dDiff) - } - - // Artifacts diff - diffs := primitiveObjectSetDiff( - interfaceSlice(t.Artifacts), - interfaceSlice(other.Artifacts), - nil, - "Artifact", - contextual) - if diffs != nil { - diff.Objects = append(diff.Objects, diffs...) - } - - // Services diff - if sDiffs := serviceDiffs(t.Services, other.Services, contextual); sDiffs != nil { - diff.Objects = append(diff.Objects, sDiffs...) - } - - // Vault diff - vDiff := vaultDiff(t.Vault, other.Vault, contextual) - if vDiff != nil { - diff.Objects = append(diff.Objects, vDiff) - } - - // Template diff - tmplDiffs := primitiveObjectSetDiff( - interfaceSlice(t.Templates), - interfaceSlice(other.Templates), - nil, - "Template", - contextual) - if tmplDiffs != nil { - diff.Objects = append(diff.Objects, tmplDiffs...) - } - - return diff, nil -} - -func (t *TaskDiff) GoString() string { - var out string - if len(t.Annotations) == 0 { - out = fmt.Sprintf("Task %q (%s):\n", t.Name, t.Type) - } else { - out = fmt.Sprintf("Task %q (%s) (%s):\n", t.Name, t.Type, strings.Join(t.Annotations, ",")) - } - - for _, f := range t.Fields { - out += fmt.Sprintf("%#v\n", f) - } - - for _, o := range t.Objects { - out += fmt.Sprintf("%#v\n", o) - } - - return out -} - -// taskDiffs diffs a set of tasks. If contextual diff is enabled, unchanged -// fields within objects nested in the tasks will be returned. -func taskDiffs(old, new []*Task, contextual bool) ([]*TaskDiff, error) { - oldMap := make(map[string]*Task, len(old)) - newMap := make(map[string]*Task, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*TaskDiff - for name, oldGroup := range oldMap { - // Diff the same, deleted and edited - diff, err := oldGroup.Diff(newMap[name], contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - - for name, newGroup := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - diff, err := old.Diff(newGroup, contextual) - if err != nil { - return nil, err - } - diffs = append(diffs, diff) - } - } - - sort.Sort(TaskDiffs(diffs)) - return diffs, nil -} - -// For sorting TaskDiffs -type TaskDiffs []*TaskDiff - -func (t TaskDiffs) Len() int { return len(t) } -func (t TaskDiffs) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t TaskDiffs) Less(i, j int) bool { return t[i].Name < t[j].Name } - -// serviceDiff returns the diff of two service objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func serviceDiff(old, new *Service, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Service"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &Service{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &Service{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Checks diffs - if cDiffs := serviceCheckDiffs(old.Checks, new.Checks, contextual); cDiffs != nil { - diff.Objects = append(diff.Objects, cDiffs...) - } - - return diff -} - -// serviceDiffs diffs a set of services. If contextual diff is enabled, unchanged -// fields within objects nested in the tasks will be returned. -func serviceDiffs(old, new []*Service, contextual bool) []*ObjectDiff { - oldMap := make(map[string]*Service, len(old)) - newMap := make(map[string]*Service, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*ObjectDiff - for name, oldService := range oldMap { - // Diff the same, deleted and edited - if diff := serviceDiff(oldService, newMap[name], contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for name, newService := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - if diff := serviceDiff(old, newService, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// serviceCheckDiff returns the diff of two service check objects. If contextual -// diff is enabled, all fields will be returned, even if no diff occurred. -func serviceCheckDiff(old, new *ServiceCheck, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Check"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &ServiceCheck{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &ServiceCheck{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Diff Header - if headerDiff := checkHeaderDiff(old.Header, new.Header, contextual); headerDiff != nil { - diff.Objects = append(diff.Objects, headerDiff) - } - - return diff -} - -// checkHeaderDiff returns the diff of two service check header objects. If -// contextual diff is enabled, all fields will be returned, even if no diff -// occurred. -func checkHeaderDiff(old, new map[string][]string, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Header"} - if reflect.DeepEqual(old, new) { - return nil - } else if len(old) == 0 { - diff.Type = DiffTypeAdded - } else if len(new) == 0 { - diff.Type = DiffTypeDeleted - } else { - diff.Type = DiffTypeEdited - } - oldFlat := flatmap.Flatten(old, nil, false) - newFlat := flatmap.Flatten(new, nil, false) - diff.Fields = fieldDiffs(oldFlat, newFlat, contextual) - return diff -} - -// serviceCheckDiffs diffs a set of service checks. If contextual diff is -// enabled, unchanged fields within objects nested in the tasks will be -// returned. -func serviceCheckDiffs(old, new []*ServiceCheck, contextual bool) []*ObjectDiff { - oldMap := make(map[string]*ServiceCheck, len(old)) - newMap := make(map[string]*ServiceCheck, len(new)) - for _, o := range old { - oldMap[o.Name] = o - } - for _, n := range new { - newMap[n.Name] = n - } - - var diffs []*ObjectDiff - for name, oldCheck := range oldMap { - // Diff the same, deleted and edited - if diff := serviceCheckDiff(oldCheck, newMap[name], contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for name, newCheck := range newMap { - // Diff the added - if old, ok := oldMap[name]; !ok { - if diff := serviceCheckDiff(old, newCheck, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// vaultDiff returns the diff of two vault objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func vaultDiff(old, new *Vault, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Vault"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &Vault{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &Vault{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Policies diffs - if setDiff := stringSetDiff(old.Policies, new.Policies, "Policies", contextual); setDiff != nil { - diff.Objects = append(diff.Objects, setDiff) - } - - return diff -} - -// parameterizedJobDiff returns the diff of two parameterized job objects. If -// contextual diff is enabled, all fields will be returned, even if no diff -// occurred. -func parameterizedJobDiff(old, new *ParameterizedJobConfig, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "ParameterizedJob"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - old = &ParameterizedJobConfig{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } else if new == nil { - new = &ParameterizedJobConfig{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(old, nil, true) - newPrimitiveFlat = flatmap.Flatten(new, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Meta diffs - if optionalDiff := stringSetDiff(old.MetaOptional, new.MetaOptional, "MetaOptional", contextual); optionalDiff != nil { - diff.Objects = append(diff.Objects, optionalDiff) - } - - if requiredDiff := stringSetDiff(old.MetaRequired, new.MetaRequired, "MetaRequired", contextual); requiredDiff != nil { - diff.Objects = append(diff.Objects, requiredDiff) - } - - return diff -} - -// Diff returns a diff of two resource objects. If contextual diff is enabled, -// non-changed fields will still be returned. -func (r *Resources) Diff(other *Resources, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Resources"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - - if reflect.DeepEqual(r, other) { - return nil - } else if r == nil { - r = &Resources{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, nil, true) - } else if other == nil { - other = &Resources{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(r, nil, true) - newPrimitiveFlat = flatmap.Flatten(other, nil, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Network Resources diff - if nDiffs := networkResourceDiffs(r.Networks, other.Networks, contextual); nDiffs != nil { - diff.Objects = append(diff.Objects, nDiffs...) - } - - return diff -} - -// Diff returns a diff of two network resources. If contextual diff is enabled, -// non-changed fields will still be returned. -func (r *NetworkResource) Diff(other *NetworkResource, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Network"} - var oldPrimitiveFlat, newPrimitiveFlat map[string]string - filter := []string{"Device", "CIDR", "IP"} - - if reflect.DeepEqual(r, other) { - return nil - } else if r == nil { - r = &NetworkResource{} - diff.Type = DiffTypeAdded - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } else if other == nil { - other = &NetworkResource{} - diff.Type = DiffTypeDeleted - oldPrimitiveFlat = flatmap.Flatten(r, filter, true) - } else { - diff.Type = DiffTypeEdited - oldPrimitiveFlat = flatmap.Flatten(r, filter, true) - newPrimitiveFlat = flatmap.Flatten(other, filter, true) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - // Port diffs - resPorts := portDiffs(r.ReservedPorts, other.ReservedPorts, false, contextual) - dynPorts := portDiffs(r.DynamicPorts, other.DynamicPorts, true, contextual) - if resPorts != nil { - diff.Objects = append(diff.Objects, resPorts...) - } - if dynPorts != nil { - diff.Objects = append(diff.Objects, dynPorts...) - } - - return diff -} - -// networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, -// non-changed fields will still be returned. -func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { - makeSet := func(objects []*NetworkResource) map[string]*NetworkResource { - objMap := make(map[string]*NetworkResource, len(objects)) - for _, obj := range objects { - hash, err := hashstructure.Hash(obj, nil) - if err != nil { - panic(err) - } - objMap[fmt.Sprintf("%d", hash)] = obj - } - - return objMap - } - - oldSet := makeSet(old) - newSet := makeSet(new) - - var diffs []*ObjectDiff - for k, oldV := range oldSet { - if newV, ok := newSet[k]; !ok { - if diff := oldV.Diff(newV, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - for k, newV := range newSet { - if oldV, ok := oldSet[k]; !ok { - if diff := oldV.Diff(newV, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs - -} - -// portDiffs returns the diff of two sets of ports. The dynamic flag marks the -// set of ports as being Dynamic ports versus Static ports. If contextual diff is enabled, -// non-changed fields will still be returned. -func portDiffs(old, new []Port, dynamic bool, contextual bool) []*ObjectDiff { - makeSet := func(ports []Port) map[string]Port { - portMap := make(map[string]Port, len(ports)) - for _, port := range ports { - portMap[port.Label] = port - } - - return portMap - } - - oldPorts := makeSet(old) - newPorts := makeSet(new) - - var filter []string - name := "Static Port" - if dynamic { - filter = []string{"Value"} - name = "Dynamic Port" - } - - var diffs []*ObjectDiff - for portLabel, oldPort := range oldPorts { - // Diff the same, deleted and edited - if newPort, ok := newPorts[portLabel]; ok { - diff := primitiveObjectDiff(oldPort, newPort, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } else { - diff := primitiveObjectDiff(oldPort, nil, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } - } - for label, newPort := range newPorts { - // Diff the added - if _, ok := oldPorts[label]; !ok { - diff := primitiveObjectDiff(nil, newPort, filter, name, contextual) - if diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs - -} - -// configDiff returns the diff of two Task Config objects. If contextual diff is -// enabled, all fields will be returned, even if no diff occurred. -func configDiff(old, new map[string]interface{}, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Config"} - if reflect.DeepEqual(old, new) { - return nil - } else if len(old) == 0 { - diff.Type = DiffTypeAdded - } else if len(new) == 0 { - diff.Type = DiffTypeDeleted - } else { - diff.Type = DiffTypeEdited - } - - // Diff the primitive fields. - oldPrimitiveFlat := flatmap.Flatten(old, nil, false) - newPrimitiveFlat := flatmap.Flatten(new, nil, false) - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - return diff -} - -// ObjectDiff contains the diff of two generic objects. -type ObjectDiff struct { - Type DiffType - Name string - Fields []*FieldDiff - Objects []*ObjectDiff -} - -func (o *ObjectDiff) GoString() string { - out := fmt.Sprintf("\n%q (%s) {\n", o.Name, o.Type) - for _, f := range o.Fields { - out += fmt.Sprintf("%#v\n", f) - } - for _, o := range o.Objects { - out += fmt.Sprintf("%#v\n", o) - } - out += "}" - return out -} - -func (o *ObjectDiff) Less(other *ObjectDiff) bool { - if reflect.DeepEqual(o, other) { - return false - } else if other == nil { - return false - } else if o == nil { - return true - } - - if o.Name != other.Name { - return o.Name < other.Name - } - - if o.Type != other.Type { - return o.Type.Less(other.Type) - } - - if lO, lOther := len(o.Fields), len(other.Fields); lO != lOther { - return lO < lOther - } - - if lO, lOther := len(o.Objects), len(other.Objects); lO != lOther { - return lO < lOther - } - - // Check each field - sort.Sort(FieldDiffs(o.Fields)) - sort.Sort(FieldDiffs(other.Fields)) - - for i, oV := range o.Fields { - if oV.Less(other.Fields[i]) { - return true - } - } - - // Check each object - sort.Sort(ObjectDiffs(o.Objects)) - sort.Sort(ObjectDiffs(other.Objects)) - for i, oV := range o.Objects { - if oV.Less(other.Objects[i]) { - return true - } - } - - return false -} - -// For sorting ObjectDiffs -type ObjectDiffs []*ObjectDiff - -func (o ObjectDiffs) Len() int { return len(o) } -func (o ObjectDiffs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o ObjectDiffs) Less(i, j int) bool { return o[i].Less(o[j]) } - -type FieldDiff struct { - Type DiffType - Name string - Old, New string - Annotations []string -} - -// fieldDiff returns a FieldDiff if old and new are different otherwise, it -// returns nil. If contextual diff is enabled, even non-changed fields will be -// returned. -func fieldDiff(old, new, name string, contextual bool) *FieldDiff { - diff := &FieldDiff{Name: name, Type: DiffTypeNone} - if old == new { - if !contextual { - return nil - } - diff.Old, diff.New = old, new - return diff - } - - if old == "" { - diff.Type = DiffTypeAdded - diff.New = new - } else if new == "" { - diff.Type = DiffTypeDeleted - diff.Old = old - } else { - diff.Type = DiffTypeEdited - diff.Old = old - diff.New = new - } - return diff -} - -func (f *FieldDiff) GoString() string { - out := fmt.Sprintf("%q (%s): %q => %q", f.Name, f.Type, f.Old, f.New) - if len(f.Annotations) != 0 { - out += fmt.Sprintf(" (%s)", strings.Join(f.Annotations, ", ")) - } - - return out -} - -func (f *FieldDiff) Less(other *FieldDiff) bool { - if reflect.DeepEqual(f, other) { - return false - } else if other == nil { - return false - } else if f == nil { - return true - } - - if f.Name != other.Name { - return f.Name < other.Name - } else if f.Old != other.Old { - return f.Old < other.Old - } - - return f.New < other.New -} - -// For sorting FieldDiffs -type FieldDiffs []*FieldDiff - -func (f FieldDiffs) Len() int { return len(f) } -func (f FieldDiffs) Swap(i, j int) { f[i], f[j] = f[j], f[i] } -func (f FieldDiffs) Less(i, j int) bool { return f[i].Less(f[j]) } - -// fieldDiffs takes a map of field names to their values and returns a set of -// field diffs. If contextual diff is enabled, even non-changed fields will be -// returned. -func fieldDiffs(old, new map[string]string, contextual bool) []*FieldDiff { - var diffs []*FieldDiff - visited := make(map[string]struct{}) - for k, oldV := range old { - visited[k] = struct{}{} - newV := new[k] - if diff := fieldDiff(oldV, newV, k, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - - for k, newV := range new { - if _, ok := visited[k]; !ok { - if diff := fieldDiff("", newV, k, contextual); diff != nil { - diffs = append(diffs, diff) - } - } - } - - sort.Sort(FieldDiffs(diffs)) - return diffs -} - -// stringSetDiff diffs two sets of strings with the given name. -func stringSetDiff(old, new []string, name string, contextual bool) *ObjectDiff { - oldMap := make(map[string]struct{}, len(old)) - newMap := make(map[string]struct{}, len(new)) - for _, o := range old { - oldMap[o] = struct{}{} - } - for _, n := range new { - newMap[n] = struct{}{} - } - if reflect.DeepEqual(oldMap, newMap) && !contextual { - return nil - } - - diff := &ObjectDiff{Name: name} - var added, removed bool - for k := range oldMap { - if _, ok := newMap[k]; !ok { - diff.Fields = append(diff.Fields, fieldDiff(k, "", name, contextual)) - removed = true - } else if contextual { - diff.Fields = append(diff.Fields, fieldDiff(k, k, name, contextual)) - } - } - - for k := range newMap { - if _, ok := oldMap[k]; !ok { - diff.Fields = append(diff.Fields, fieldDiff("", k, name, contextual)) - added = true - } - } - - sort.Sort(FieldDiffs(diff.Fields)) - - // Determine the type - if added && removed { - diff.Type = DiffTypeEdited - } else if added { - diff.Type = DiffTypeAdded - } else if removed { - diff.Type = DiffTypeDeleted - } else { - // Diff of an empty set - if len(diff.Fields) == 0 { - return nil - } - - diff.Type = DiffTypeNone - } - - return diff -} - -// primitiveObjectDiff returns a diff of the passed objects' primitive fields. -// The filter field can be used to exclude fields from the diff. The name is the -// name of the objects. If contextual is set, non-changed fields will also be -// stored in the object diff. -func primitiveObjectDiff(old, new interface{}, filter []string, name string, contextual bool) *ObjectDiff { - oldPrimitiveFlat := flatmap.Flatten(old, filter, true) - newPrimitiveFlat := flatmap.Flatten(new, filter, true) - delete(oldPrimitiveFlat, "") - delete(newPrimitiveFlat, "") - - diff := &ObjectDiff{Name: name} - diff.Fields = fieldDiffs(oldPrimitiveFlat, newPrimitiveFlat, contextual) - - var added, deleted, edited bool - for _, f := range diff.Fields { - switch f.Type { - case DiffTypeEdited: - edited = true - break - case DiffTypeDeleted: - deleted = true - case DiffTypeAdded: - added = true - } - } - - if edited || added && deleted { - diff.Type = DiffTypeEdited - } else if added { - diff.Type = DiffTypeAdded - } else if deleted { - diff.Type = DiffTypeDeleted - } else { - return nil - } - - return diff -} - -// primitiveObjectSetDiff does a set difference of the old and new sets. The -// filter parameter can be used to filter a set of primitive fields in the -// passed structs. The name corresponds to the name of the passed objects. If -// contextual diff is enabled, objects' primtive fields will be returned even if -// no diff exists. -func primitiveObjectSetDiff(old, new []interface{}, filter []string, name string, contextual bool) []*ObjectDiff { - makeSet := func(objects []interface{}) map[string]interface{} { - objMap := make(map[string]interface{}, len(objects)) - for _, obj := range objects { - hash, err := hashstructure.Hash(obj, nil) - if err != nil { - panic(err) - } - objMap[fmt.Sprintf("%d", hash)] = obj - } - - return objMap - } - - oldSet := makeSet(old) - newSet := makeSet(new) - - var diffs []*ObjectDiff - for k, v := range oldSet { - // Deleted - if _, ok := newSet[k]; !ok { - diffs = append(diffs, primitiveObjectDiff(v, nil, filter, name, contextual)) - } - } - for k, v := range newSet { - // Added - if _, ok := oldSet[k]; !ok { - diffs = append(diffs, primitiveObjectDiff(nil, v, filter, name, contextual)) - } - } - - sort.Sort(ObjectDiffs(diffs)) - return diffs -} - -// interfaceSlice is a helper method that takes a slice of typed elements and -// returns a slice of interface. This method will panic if given a non-slice -// input. -func interfaceSlice(slice interface{}) []interface{} { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - panic("InterfaceSlice() given a non-slice type") - } - - ret := make([]interface{}, s.Len()) - - for i := 0; i < s.Len(); i++ { - ret[i] = s.Index(i).Interface() - } - - return ret -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go deleted file mode 100644 index 751befd65d..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/funcs.go +++ /dev/null @@ -1,310 +0,0 @@ -package structs - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math" - "sort" - "strings" - - "golang.org/x/crypto/blake2b" - - multierror "github.com/hashicorp/go-multierror" - lru "github.com/hashicorp/golang-lru" - "github.com/hashicorp/nomad/acl" -) - -// MergeMultierrorWarnings takes job warnings and canonicalize warnings and -// merges them into a returnable string. Both the errors may be nil. -func MergeMultierrorWarnings(warnings ...error) string { - var warningMsg multierror.Error - for _, warn := range warnings { - if warn != nil { - multierror.Append(&warningMsg, warn) - } - } - - if len(warningMsg.Errors) == 0 { - return "" - } - - // Set the formatter - warningMsg.ErrorFormat = warningsFormatter - return warningMsg.Error() -} - -// warningsFormatter is used to format job warnings -func warningsFormatter(es []error) string { - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d warning(s):\n\n%s", - len(es), strings.Join(points, "\n")) -} - -// RemoveAllocs is used to remove any allocs with the given IDs -// from the list of allocations -func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { - // Convert remove into a set - removeSet := make(map[string]struct{}) - for _, remove := range remove { - removeSet[remove.ID] = struct{}{} - } - - n := len(alloc) - for i := 0; i < n; i++ { - if _, ok := removeSet[alloc[i].ID]; ok { - alloc[i], alloc[n-1] = alloc[n-1], nil - i-- - n-- - } - } - - alloc = alloc[:n] - return alloc -} - -// FilterTerminalAllocs filters out all allocations in a terminal state and -// returns the latest terminal allocations -func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { - terminalAllocsByName := make(map[string]*Allocation) - n := len(allocs) - for i := 0; i < n; i++ { - if allocs[i].TerminalStatus() { - - // Add the allocation to the terminal allocs map if it's not already - // added or has a higher create index than the one which is - // currently present. - alloc, ok := terminalAllocsByName[allocs[i].Name] - if !ok || alloc.CreateIndex < allocs[i].CreateIndex { - terminalAllocsByName[allocs[i].Name] = allocs[i] - } - - // Remove the allocation - allocs[i], allocs[n-1] = allocs[n-1], nil - i-- - n-- - } - } - return allocs[:n], terminalAllocsByName -} - -// AllocsFit checks if a given set of allocations will fit on a node. -// The netIdx can optionally be provided if its already been computed. -// If the netIdx is provided, it is assumed that the client has already -// ensured there are no collisions. -func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { - // Compute the utilization from zero - used := new(Resources) - - // Add the reserved resources of the node - if node.Reserved != nil { - if err := used.Add(node.Reserved); err != nil { - return false, "", nil, err - } - } - - // For each alloc, add the resources - for _, alloc := range allocs { - if alloc.Resources != nil { - if err := used.Add(alloc.Resources); err != nil { - return false, "", nil, err - } - } else if alloc.TaskResources != nil { - - // Adding the shared resource asks for the allocation to the used - // resources - if err := used.Add(alloc.SharedResources); err != nil { - return false, "", nil, err - } - // Allocations within the plan have the combined resources stripped - // to save space, so sum up the individual task resources. - for _, taskResource := range alloc.TaskResources { - if err := used.Add(taskResource); err != nil { - return false, "", nil, err - } - } - } else { - return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) - } - } - - // Check that the node resources are a super set of those - // that are being allocated - if superset, dimension := node.Resources.Superset(used); !superset { - return false, dimension, used, nil - } - - // Create the network index if missing - if netIdx == nil { - netIdx = NewNetworkIndex() - defer netIdx.Release() - if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { - return false, "reserved port collision", used, nil - } - } - - // Check if the network is overcommitted - if netIdx.Overcommitted() { - return false, "bandwidth exceeded", used, nil - } - - // Allocations fit! - return true, "", used, nil -} - -// ScoreFit is used to score the fit based on the Google work published here: -// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt -// This is equivalent to their BestFit v3 -func ScoreFit(node *Node, util *Resources) float64 { - // Determine the node availability - nodeCpu := float64(node.Resources.CPU) - if node.Reserved != nil { - nodeCpu -= float64(node.Reserved.CPU) - } - nodeMem := float64(node.Resources.MemoryMB) - if node.Reserved != nil { - nodeMem -= float64(node.Reserved.MemoryMB) - } - - // Compute the free percentage - freePctCpu := 1 - (float64(util.CPU) / nodeCpu) - freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) - - // Total will be "maximized" the smaller the value is. - // At 100% utilization, the total is 2, while at 0% util it is 20. - total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) - - // Invert so that the "maximized" total represents a high-value - // score. Because the floor is 20, we simply use that as an anchor. - // This means at a perfect fit, we return 18 as the score. - score := 20.0 - total - - // Bound the score, just in case - // If the score is over 18, that means we've overfit the node. - if score > 18.0 { - score = 18.0 - } else if score < 0 { - score = 0 - } - return score -} - -// GenerateUUID is used to generate a random UUID -func GenerateUUID() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -func CopySliceConstraints(s []*Constraint) []*Constraint { - l := len(s) - if l == 0 { - return nil - } - - c := make([]*Constraint, l) - for i, v := range s { - c[i] = v.Copy() - } - return c -} - -// VaultPoliciesSet takes the structure returned by VaultPolicies and returns -// the set of required policies -func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { - set := make(map[string]struct{}) - - for _, tgp := range policies { - for _, tp := range tgp { - for _, p := range tp.Policies { - set[p] = struct{}{} - } - } - } - - flattened := make([]string, 0, len(set)) - for p := range set { - flattened = append(flattened, p) - } - return flattened -} - -// DenormalizeAllocationJobs is used to attach a job to all allocations that are -// non-terminal and do not have a job already. This is useful in cases where the -// job is normalized. -func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { - if job != nil { - for _, alloc := range allocs { - if alloc.Job == nil && !alloc.TerminalStatus() { - alloc.Job = job - } - } - } -} - -// AllocName returns the name of the allocation given the input. -func AllocName(job, group string, idx uint) string { - return fmt.Sprintf("%s.%s[%d]", job, group, idx) -} - -// ACLPolicyListHash returns a consistent hash for a set of policies. -func ACLPolicyListHash(policies []*ACLPolicy) string { - cacheKeyHash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - for _, policy := range policies { - cacheKeyHash.Write([]byte(policy.Name)) - binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) - } - cacheKey := string(cacheKeyHash.Sum(nil)) - return cacheKey -} - -// CompileACLObject compiles a set of ACL policies into an ACL object with a cache -func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { - // Sort the policies to ensure consistent ordering - sort.Slice(policies, func(i, j int) bool { - return policies[i].Name < policies[j].Name - }) - - // Determine the cache key - cacheKey := ACLPolicyListHash(policies) - aclRaw, ok := cache.Get(cacheKey) - if ok { - return aclRaw.(*acl.ACL), nil - } - - // Parse the policies - parsed := make([]*acl.Policy, 0, len(policies)) - for _, policy := range policies { - p, err := acl.Parse(policy.Rules) - if err != nil { - return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) - } - parsed = append(parsed, p) - } - - // Create the ACL object - aclObj, err := acl.NewACL(false, parsed) - if err != nil { - return nil, fmt.Errorf("failed to construct ACL: %v", err) - } - - // Update the cache - cache.Add(cacheKey, aclObj) - return aclObj, nil -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go b/vendor/github.com/hashicorp/nomad/nomad/structs/network.go deleted file mode 100644 index 3f0ebff4f0..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/network.go +++ /dev/null @@ -1,326 +0,0 @@ -package structs - -import ( - "fmt" - "math/rand" - "net" - "sync" -) - -const ( - // MinDynamicPort is the smallest dynamic port generated - MinDynamicPort = 20000 - - // MaxDynamicPort is the largest dynamic port generated - MaxDynamicPort = 32000 - - // maxRandPortAttempts is the maximum number of attempt - // to assign a random port - maxRandPortAttempts = 20 - - // maxValidPort is the max valid port number - maxValidPort = 65536 -) - -var ( - // bitmapPool is used to pool the bitmaps used for port collision - // checking. They are fairly large (8K) so we can re-use them to - // avoid GC pressure. Care should be taken to call Clear() on any - // bitmap coming from the pool. - bitmapPool = new(sync.Pool) -) - -// NetworkIndex is used to index the available network resources -// and the used network resources on a machine given allocations -type NetworkIndex struct { - AvailNetworks []*NetworkResource // List of available networks - AvailBandwidth map[string]int // Bandwidth by device - UsedPorts map[string]Bitmap // Ports by IP - UsedBandwidth map[string]int // Bandwidth by device -} - -// NewNetworkIndex is used to construct a new network index -func NewNetworkIndex() *NetworkIndex { - return &NetworkIndex{ - AvailBandwidth: make(map[string]int), - UsedPorts: make(map[string]Bitmap), - UsedBandwidth: make(map[string]int), - } -} - -// Release is called when the network index is no longer needed -// to attempt to re-use some of the memory it has allocated -func (idx *NetworkIndex) Release() { - for _, b := range idx.UsedPorts { - bitmapPool.Put(b) - } -} - -// Overcommitted checks if the network is overcommitted -func (idx *NetworkIndex) Overcommitted() bool { - for device, used := range idx.UsedBandwidth { - avail := idx.AvailBandwidth[device] - if used > avail { - return true - } - } - return false -} - -// SetNode is used to setup the available network resources. Returns -// true if there is a collision -func (idx *NetworkIndex) SetNode(node *Node) (collide bool) { - // Add the available CIDR blocks - for _, n := range node.Resources.Networks { - if n.Device != "" { - idx.AvailNetworks = append(idx.AvailNetworks, n) - idx.AvailBandwidth[n.Device] = n.MBits - } - } - - // Add the reserved resources - if r := node.Reserved; r != nil { - for _, n := range r.Networks { - if idx.AddReserved(n) { - collide = true - } - } - } - return -} - -// AddAllocs is used to add the used network resources. Returns -// true if there is a collision -func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) { - for _, alloc := range allocs { - for _, task := range alloc.TaskResources { - if len(task.Networks) == 0 { - continue - } - n := task.Networks[0] - if idx.AddReserved(n) { - collide = true - } - } - } - return -} - -// AddReserved is used to add a reserved network usage, returns true -// if there is a port collision -func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) { - // Add the port usage - used := idx.UsedPorts[n.IP] - if used == nil { - // Try to get a bitmap from the pool, else create - raw := bitmapPool.Get() - if raw != nil { - used = raw.(Bitmap) - used.Clear() - } else { - used, _ = NewBitmap(maxValidPort) - } - idx.UsedPorts[n.IP] = used - } - - for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} { - for _, port := range ports { - // Guard against invalid port - if port.Value < 0 || port.Value >= maxValidPort { - return true - } - if used.Check(uint(port.Value)) { - collide = true - } else { - used.Set(uint(port.Value)) - } - } - } - - // Add the bandwidth - idx.UsedBandwidth[n.Device] += n.MBits - return -} - -// yieldIP is used to iteratively invoke the callback with -// an available IP -func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) { - inc := func(ip net.IP) { - for j := len(ip) - 1; j >= 0; j-- { - ip[j]++ - if ip[j] > 0 { - break - } - } - } - - for _, n := range idx.AvailNetworks { - ip, ipnet, err := net.ParseCIDR(n.CIDR) - if err != nil { - continue - } - for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { - if cb(n, ip) { - return - } - } - } -} - -// AssignNetwork is used to assign network resources given an ask. -// If the ask cannot be satisfied, returns nil -func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) { - err = fmt.Errorf("no networks available") - idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) { - // Convert the IP to a string - ipStr := ip.String() - - // Check if we would exceed the bandwidth cap - availBandwidth := idx.AvailBandwidth[n.Device] - usedBandwidth := idx.UsedBandwidth[n.Device] - if usedBandwidth+ask.MBits > availBandwidth { - err = fmt.Errorf("bandwidth exceeded") - return - } - - used := idx.UsedPorts[ipStr] - - // Check if any of the reserved ports are in use - for _, port := range ask.ReservedPorts { - // Guard against invalid port - if port.Value < 0 || port.Value >= maxValidPort { - err = fmt.Errorf("invalid port %d (out of range)", port.Value) - return - } - - // Check if in use - if used != nil && used.Check(uint(port.Value)) { - err = fmt.Errorf("reserved port collision") - return - } - } - - // Create the offer - offer := &NetworkResource{ - Device: n.Device, - IP: ipStr, - MBits: ask.MBits, - ReservedPorts: ask.ReservedPorts, - DynamicPorts: ask.DynamicPorts, - } - - // Try to stochastically pick the dynamic ports as it is faster and - // lower memory usage. - var dynPorts []int - var dynErr error - dynPorts, dynErr = getDynamicPortsStochastic(used, ask) - if dynErr == nil { - goto BUILD_OFFER - } - - // Fall back to the precise method if the random sampling failed. - dynPorts, dynErr = getDynamicPortsPrecise(used, ask) - if dynErr != nil { - err = dynErr - return - } - - BUILD_OFFER: - for i, port := range dynPorts { - offer.DynamicPorts[i].Value = port - } - - // Stop, we have an offer! - out = offer - err = nil - return true - }) - return -} - -// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fullfil the ask's DynamicPorts or an error if it failed. An error -// means the ask can not be satisfied as the method does a precise search. -func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { - // Create a copy of the used ports and apply the new reserves - var usedSet Bitmap - var err error - if nodeUsed != nil { - usedSet, err = nodeUsed.Copy() - if err != nil { - return nil, err - } - } else { - usedSet, err = NewBitmap(maxValidPort) - if err != nil { - return nil, err - } - } - - for _, port := range ask.ReservedPorts { - usedSet.Set(uint(port.Value)) - } - - // Get the indexes of the unset - availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort) - - // Randomize the amount we need - numDyn := len(ask.DynamicPorts) - if len(availablePorts) < numDyn { - return nil, fmt.Errorf("dynamic port selection failed") - } - - numAvailable := len(availablePorts) - for i := 0; i < numDyn; i++ { - j := rand.Intn(numAvailable) - availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i] - } - - return availablePorts[:numDyn], nil -} - -// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if -// no ports have been allocated yet, the network ask and returns a set of unused -// ports to fullfil the ask's DynamicPorts or an error if it failed. An error -// does not mean the ask can not be satisfied as the method has a fixed amount -// of random probes and if these fail, the search is aborted. -func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) { - var reserved, dynamic []int - for _, port := range ask.ReservedPorts { - reserved = append(reserved, port.Value) - } - - for i := 0; i < len(ask.DynamicPorts); i++ { - attempts := 0 - PICK: - attempts++ - if attempts > maxRandPortAttempts { - return nil, fmt.Errorf("stochastic dynamic port selection failed") - } - - randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort) - if nodeUsed != nil && nodeUsed.Check(uint(randPort)) { - goto PICK - } - - for _, ports := range [][]int{reserved, dynamic} { - if isPortReserved(ports, randPort) { - goto PICK - } - } - dynamic = append(dynamic, randPort) - } - - return dynamic, nil -} - -// IntContains scans an integer slice for a value -func isPortReserved(haystack []int, needle int) bool { - for _, item := range haystack { - if item == needle { - return true - } - } - return false -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go b/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go deleted file mode 100644 index aab0700550..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/node_class.go +++ /dev/null @@ -1,94 +0,0 @@ -package structs - -import ( - "fmt" - "strings" - - "github.com/mitchellh/hashstructure" -) - -const ( - // NodeUniqueNamespace is a prefix that can be appended to node meta or - // attribute keys to mark them for exclusion in computed node class. - NodeUniqueNamespace = "unique." -) - -// UniqueNamespace takes a key and returns the key marked under the unique -// namespace. -func UniqueNamespace(key string) string { - return fmt.Sprintf("%s%s", NodeUniqueNamespace, key) -} - -// IsUniqueNamespace returns whether the key is under the unique namespace. -func IsUniqueNamespace(key string) bool { - return strings.HasPrefix(key, NodeUniqueNamespace) -} - -// ComputeClass computes a derived class for the node based on its attributes. -// ComputedClass is a unique id that identifies nodes with a common set of -// attributes and capabilities. Thus, when calculating a node's computed class -// we avoid including any uniquely identifing fields. -func (n *Node) ComputeClass() error { - hash, err := hashstructure.Hash(n, nil) - if err != nil { - return err - } - - n.ComputedClass = fmt.Sprintf("v1:%d", hash) - return nil -} - -// HashInclude is used to blacklist uniquely identifying node fields from being -// included in the computed node class. -func (n Node) HashInclude(field string, v interface{}) (bool, error) { - switch field { - case "Datacenter", "Attributes", "Meta", "NodeClass": - return true, nil - default: - return false, nil - } -} - -// HashIncludeMap is used to blacklist uniquely identifying node map keys from being -// included in the computed node class. -func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) { - key, ok := k.(string) - if !ok { - return false, fmt.Errorf("map key %v not a string", k) - } - - switch field { - case "Meta", "Attributes": - return !IsUniqueNamespace(key), nil - default: - return false, fmt.Errorf("unexpected map field: %v", field) - } -} - -// EscapedConstraints takes a set of constraints and returns the set that -// escapes computed node classes. -func EscapedConstraints(constraints []*Constraint) []*Constraint { - var escaped []*Constraint - for _, c := range constraints { - if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) { - escaped = append(escaped, c) - } - } - - return escaped -} - -// constraintTargetEscapes returns whether the target of a constraint escapes -// computed node class optimization. -func constraintTargetEscapes(target string) bool { - switch { - case strings.HasPrefix(target, "${node.unique."): - return true - case strings.HasPrefix(target, "${attr.unique."): - return true - case strings.HasPrefix(target, "${meta.unique."): - return true - default: - return false - } -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go b/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go deleted file mode 100644 index 93b99f6fb7..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/operator.go +++ /dev/null @@ -1,49 +0,0 @@ -package structs - -import ( - "github.com/hashicorp/raft" -) - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Nomad. - ID raft.ServerID - - // Node is the node name of the server, as known by Nomad, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address raft.ServerAddress - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Nomad. - Voter bool -} - -// RaftConfigrationResponse is returned when querying for the current Raft -// configuration. -type RaftConfigurationResponse struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft -// operation on a specific Raft peer by address in the form of "IP:port". -type RaftPeerByAddressRequest struct { - // Address is the peer to remove, in the form "IP:port". - Address raft.ServerAddress - - // WriteRequest holds the Region for this request. - WriteRequest -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go deleted file mode 100644 index 1ca19e35d0..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs.go +++ /dev/null @@ -1,5783 +0,0 @@ -package structs - -import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "path/filepath" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "golang.org/x/crypto/blake2b" - - "github.com/gorhill/cronexpr" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-version" - "github.com/hashicorp/nomad/acl" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/helper/args" - "github.com/mitchellh/copystructure" - "github.com/ugorji/go/codec" - - hcodec "github.com/hashicorp/go-msgpack/codec" -) - -var ( - ErrNoLeader = fmt.Errorf("No cluster leader") - ErrNoRegionPath = fmt.Errorf("No path to region") - ErrTokenNotFound = errors.New("ACL token not found") - ErrPermissionDenied = errors.New("Permission denied") - - // validPolicyName is used to validate a policy name - validPolicyName = regexp.MustCompile("^[a-zA-Z0-9-]{1,128}$") -) - -type MessageType uint8 - -const ( - NodeRegisterRequestType MessageType = iota - NodeDeregisterRequestType - NodeUpdateStatusRequestType - NodeUpdateDrainRequestType - JobRegisterRequestType - JobDeregisterRequestType - EvalUpdateRequestType - EvalDeleteRequestType - AllocUpdateRequestType - AllocClientUpdateRequestType - ReconcileJobSummariesRequestType - VaultAccessorRegisterRequestType - VaultAccessorDegisterRequestType - ApplyPlanResultsRequestType - DeploymentStatusUpdateRequestType - DeploymentPromoteRequestType - DeploymentAllocHealthRequestType - DeploymentDeleteRequestType - JobStabilityRequestType - ACLPolicyUpsertRequestType - ACLPolicyDeleteRequestType - ACLTokenUpsertRequestType - ACLTokenDeleteRequestType - ACLTokenBootstrapRequestType -) - -const ( - // IgnoreUnknownTypeFlag is set along with a MessageType - // to indicate that the message type can be safely ignored - // if it is not recognized. This is for future proofing, so - // that new commands can be added in a way that won't cause - // old servers to crash when the FSM attempts to process them. - IgnoreUnknownTypeFlag MessageType = 128 - - // ApiMajorVersion is returned as part of the Status.Version request. - // It should be incremented anytime the APIs are changed in a way - // that would break clients for sane client versioning. - ApiMajorVersion = 1 - - // ApiMinorVersion is returned as part of the Status.Version request. - // It should be incremented anytime the APIs are changed to allow - // for sane client versioning. Minor changes should be compatible - // within the major version. - ApiMinorVersion = 1 - - ProtocolVersion = "protocol" - APIMajorVersion = "api.major" - APIMinorVersion = "api.minor" - - GetterModeAny = "any" - GetterModeFile = "file" - GetterModeDir = "dir" - - // maxPolicyDescriptionLength limits a policy description length - maxPolicyDescriptionLength = 256 - - // maxTokenNameLength limits a ACL token name length - maxTokenNameLength = 64 - - // ACLClientToken and ACLManagementToken are the only types of tokens - ACLClientToken = "client" - ACLManagementToken = "management" - - // DefaultNamespace is the default namespace. - DefaultNamespace = "default" - DefaultNamespaceDescription = "Default shared namespace" -) - -// Context defines the scope in which a search for Nomad object operates, and -// is also used to query the matching index value for this context -type Context string - -const ( - Allocs Context = "allocs" - Deployments Context = "deployment" - Evals Context = "evals" - Jobs Context = "jobs" - Nodes Context = "nodes" - Namespaces Context = "namespaces" - All Context = "all" -) - -// NamespacedID is a tuple of an ID and a namespace -type NamespacedID struct { - ID string - Namespace string -} - -// RPCInfo is used to describe common information about query -type RPCInfo interface { - RequestRegion() string - IsRead() bool - AllowStaleRead() bool -} - -// QueryOptions is used to specify various flags for read queries -type QueryOptions struct { - // The target region for this query - Region string - - // Namespace is the target namespace for the query. - Namespace string - - // If set, wait until query exceeds given index. Must be provided - // with MaxQueryTime. - MinQueryIndex uint64 - - // Provided with MinQueryIndex to wait for change. - MaxQueryTime time.Duration - - // If set, any follower can service the request. Results - // may be arbitrarily stale. - AllowStale bool - - // If set, used as prefix for resource list searches - Prefix string - - // SecretID is secret portion of the ACL token used for the request - SecretID string -} - -func (q QueryOptions) RequestRegion() string { - return q.Region -} - -func (q QueryOptions) RequestNamespace() string { - if q.Namespace == "" { - return DefaultNamespace - } - return q.Namespace -} - -// QueryOption only applies to reads, so always true -func (q QueryOptions) IsRead() bool { - return true -} - -func (q QueryOptions) AllowStaleRead() bool { - return q.AllowStale -} - -type WriteRequest struct { - // The target region for this write - Region string - - // Namespace is the target namespace for the write. - Namespace string - - // SecretID is secret portion of the ACL token used for the request - SecretID string -} - -func (w WriteRequest) RequestRegion() string { - // The target region for this request - return w.Region -} - -func (w WriteRequest) RequestNamespace() string { - if w.Namespace == "" { - return DefaultNamespace - } - return w.Namespace -} - -// WriteRequest only applies to writes, always false -func (w WriteRequest) IsRead() bool { - return false -} - -func (w WriteRequest) AllowStaleRead() bool { - return false -} - -// QueryMeta allows a query response to include potentially -// useful metadata about a query -type QueryMeta struct { - // This is the index associated with the read - Index uint64 - - // If AllowStale is used, this is time elapsed since - // last contact between the follower and leader. This - // can be used to gauge staleness. - LastContact time.Duration - - // Used to indicate if there is a known leader node - KnownLeader bool -} - -// WriteMeta allows a write response to include potentially -// useful metadata about the write -type WriteMeta struct { - // This is the index associated with the write - Index uint64 -} - -// NodeRegisterRequest is used for Node.Register endpoint -// to register a node as being a schedulable entity. -type NodeRegisterRequest struct { - Node *Node - WriteRequest -} - -// NodeDeregisterRequest is used for Node.Deregister endpoint -// to deregister a node as being a schedulable entity. -type NodeDeregisterRequest struct { - NodeID string - WriteRequest -} - -// NodeServerInfo is used to in NodeUpdateResponse to return Nomad server -// information used in RPC server lists. -type NodeServerInfo struct { - // RPCAdvertiseAddr is the IP endpoint that a Nomad Server wishes to - // be contacted at for RPCs. - RPCAdvertiseAddr string - - // RpcMajorVersion is the major version number the Nomad Server - // supports - RPCMajorVersion int32 - - // RpcMinorVersion is the minor version number the Nomad Server - // supports - RPCMinorVersion int32 - - // Datacenter is the datacenter that a Nomad server belongs to - Datacenter string -} - -// NodeUpdateStatusRequest is used for Node.UpdateStatus endpoint -// to update the status of a node. -type NodeUpdateStatusRequest struct { - NodeID string - Status string - WriteRequest -} - -// NodeUpdateDrainRequest is used for updatin the drain status -type NodeUpdateDrainRequest struct { - NodeID string - Drain bool - WriteRequest -} - -// NodeEvaluateRequest is used to re-evaluate the ndoe -type NodeEvaluateRequest struct { - NodeID string - WriteRequest -} - -// NodeSpecificRequest is used when we just need to specify a target node -type NodeSpecificRequest struct { - NodeID string - SecretID string - QueryOptions -} - -// SearchResponse is used to return matches and information about whether -// the match list is truncated specific to each type of context. -type SearchResponse struct { - // Map of context types to ids which match a specified prefix - Matches map[Context][]string - - // Truncations indicates whether the matches for a particular context have - // been truncated - Truncations map[Context]bool - - QueryMeta -} - -// SearchRequest is used to parameterize a request, and returns a -// list of matches made up of jobs, allocations, evaluations, and/or nodes, -// along with whether or not the information returned is truncated. -type SearchRequest struct { - // Prefix is what ids are matched to. I.e, if the given prefix were - // "a", potential matches might be "abcd" or "aabb" - Prefix string - - // Context is the type that can be matched against. A context can be a job, - // node, evaluation, allocation, or empty (indicated every context should be - // matched) - Context Context - - QueryOptions -} - -// JobRegisterRequest is used for Job.Register endpoint -// to register a job as being a schedulable entity. -type JobRegisterRequest struct { - Job *Job - - // If EnforceIndex is set then the job will only be registered if the passed - // JobModifyIndex matches the current Jobs index. If the index is zero, the - // register only occurs if the job is new. - EnforceIndex bool - JobModifyIndex uint64 - - // PolicyOverride is set when the user is attempting to override any policies - PolicyOverride bool - - WriteRequest -} - -// JobDeregisterRequest is used for Job.Deregister endpoint -// to deregister a job as being a schedulable entity. -type JobDeregisterRequest struct { - JobID string - - // Purge controls whether the deregister purges the job from the system or - // whether the job is just marked as stopped and will be removed by the - // garbage collector - Purge bool - - WriteRequest -} - -// JobEvaluateRequest is used when we just need to re-evaluate a target job -type JobEvaluateRequest struct { - JobID string - WriteRequest -} - -// JobSpecificRequest is used when we just need to specify a target job -type JobSpecificRequest struct { - JobID string - AllAllocs bool - QueryOptions -} - -// JobListRequest is used to parameterize a list request -type JobListRequest struct { - QueryOptions -} - -// JobPlanRequest is used for the Job.Plan endpoint to trigger a dry-run -// evaluation of the Job. -type JobPlanRequest struct { - Job *Job - Diff bool // Toggles an annotated diff - // PolicyOverride is set when the user is attempting to override any policies - PolicyOverride bool - WriteRequest -} - -// JobSummaryRequest is used when we just need to get a specific job summary -type JobSummaryRequest struct { - JobID string - QueryOptions -} - -// JobDispatchRequest is used to dispatch a job based on a parameterized job -type JobDispatchRequest struct { - JobID string - Payload []byte - Meta map[string]string - WriteRequest -} - -// JobValidateRequest is used to validate a job -type JobValidateRequest struct { - Job *Job - WriteRequest -} - -// JobRevertRequest is used to revert a job to a prior version. -type JobRevertRequest struct { - // JobID is the ID of the job being reverted - JobID string - - // JobVersion the version to revert to. - JobVersion uint64 - - // EnforcePriorVersion if set will enforce that the job is at the given - // version before reverting. - EnforcePriorVersion *uint64 - - WriteRequest -} - -// JobStabilityRequest is used to marked a job as stable. -type JobStabilityRequest struct { - // Job to set the stability on - JobID string - JobVersion uint64 - - // Set the stability - Stable bool - WriteRequest -} - -// JobStabilityResponse is the response when marking a job as stable. -type JobStabilityResponse struct { - WriteMeta -} - -// NodeListRequest is used to parameterize a list request -type NodeListRequest struct { - QueryOptions -} - -// EvalUpdateRequest is used for upserting evaluations. -type EvalUpdateRequest struct { - Evals []*Evaluation - EvalToken string - WriteRequest -} - -// EvalDeleteRequest is used for deleting an evaluation. -type EvalDeleteRequest struct { - Evals []string - Allocs []string - WriteRequest -} - -// EvalSpecificRequest is used when we just need to specify a target evaluation -type EvalSpecificRequest struct { - EvalID string - QueryOptions -} - -// EvalAckRequest is used to Ack/Nack a specific evaluation -type EvalAckRequest struct { - EvalID string - Token string - WriteRequest -} - -// EvalDequeueRequest is used when we want to dequeue an evaluation -type EvalDequeueRequest struct { - Schedulers []string - Timeout time.Duration - SchedulerVersion uint16 - WriteRequest -} - -// EvalListRequest is used to list the evaluations -type EvalListRequest struct { - QueryOptions -} - -// PlanRequest is used to submit an allocation plan to the leader -type PlanRequest struct { - Plan *Plan - WriteRequest -} - -// ApplyPlanResultsRequest is used by the planner to apply a Raft transaction -// committing the result of a plan. -type ApplyPlanResultsRequest struct { - // AllocUpdateRequest holds the allocation updates to be made by the - // scheduler. - AllocUpdateRequest - - // Deployment is the deployment created or updated as a result of a - // scheduling event. - Deployment *Deployment - - // DeploymentUpdates is a set of status updates to apply to the given - // deployments. This allows the scheduler to cancel any unneeded deployment - // because the job is stopped or the update block is removed. - DeploymentUpdates []*DeploymentStatusUpdate -} - -// AllocUpdateRequest is used to submit changes to allocations, either -// to cause evictions or to assign new allocaitons. Both can be done -// within a single transaction -type AllocUpdateRequest struct { - // Alloc is the list of new allocations to assign - Alloc []*Allocation - - // Job is the shared parent job of the allocations. - // It is pulled out since it is common to reduce payload size. - Job *Job - - WriteRequest -} - -// AllocListRequest is used to request a list of allocations -type AllocListRequest struct { - QueryOptions -} - -// AllocSpecificRequest is used to query a specific allocation -type AllocSpecificRequest struct { - AllocID string - QueryOptions -} - -// AllocsGetRequest is used to query a set of allocations -type AllocsGetRequest struct { - AllocIDs []string - QueryOptions -} - -// PeriodicForceReqeuest is used to force a specific periodic job. -type PeriodicForceRequest struct { - JobID string - WriteRequest -} - -// ServerMembersResponse has the list of servers in a cluster -type ServerMembersResponse struct { - ServerName string - ServerRegion string - ServerDC string - Members []*ServerMember -} - -// ServerMember holds information about a Nomad server agent in a cluster -type ServerMember struct { - Name string - Addr net.IP - Port uint16 - Tags map[string]string - Status string - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// DeriveVaultTokenRequest is used to request wrapped Vault tokens for the -// following tasks in the given allocation -type DeriveVaultTokenRequest struct { - NodeID string - SecretID string - AllocID string - Tasks []string - QueryOptions -} - -// VaultAccessorsRequest is used to operate on a set of Vault accessors -type VaultAccessorsRequest struct { - Accessors []*VaultAccessor -} - -// VaultAccessor is a reference to a created Vault token on behalf of -// an allocation's task. -type VaultAccessor struct { - AllocID string - Task string - NodeID string - Accessor string - CreationTTL int - - // Raft Indexes - CreateIndex uint64 -} - -// DeriveVaultTokenResponse returns the wrapped tokens for each requested task -type DeriveVaultTokenResponse struct { - // Tasks is a mapping between the task name and the wrapped token - Tasks map[string]string - - // Error stores any error that occurred. Errors are stored here so we can - // communicate whether it is retriable - Error *RecoverableError - - QueryMeta -} - -// GenericRequest is used to request where no -// specific information is needed. -type GenericRequest struct { - QueryOptions -} - -// DeploymentListRequest is used to list the deployments -type DeploymentListRequest struct { - QueryOptions -} - -// DeploymentDeleteRequest is used for deleting deployments. -type DeploymentDeleteRequest struct { - Deployments []string - WriteRequest -} - -// DeploymentStatusUpdateRequest is used to update the status of a deployment as -// well as optionally creating an evaluation atomically. -type DeploymentStatusUpdateRequest struct { - // Eval, if set, is used to create an evaluation at the same time as - // updating the status of a deployment. - Eval *Evaluation - - // DeploymentUpdate is a status update to apply to the given - // deployment. - DeploymentUpdate *DeploymentStatusUpdate - - // Job is used to optionally upsert a job. This is used when setting the - // allocation health results in a deployment failure and the deployment - // auto-reverts to the latest stable job. - Job *Job -} - -// DeploymentAllocHealthRequest is used to set the health of a set of -// allocations as part of a deployment. -type DeploymentAllocHealthRequest struct { - DeploymentID string - - // Marks these allocations as healthy, allow further allocations - // to be rolled. - HealthyAllocationIDs []string - - // Any unhealthy allocations fail the deployment - UnhealthyAllocationIDs []string - - WriteRequest -} - -// ApplyDeploymentAllocHealthRequest is used to apply an alloc health request via Raft -type ApplyDeploymentAllocHealthRequest struct { - DeploymentAllocHealthRequest - - // An optional field to update the status of a deployment - DeploymentUpdate *DeploymentStatusUpdate - - // Job is used to optionally upsert a job. This is used when setting the - // allocation health results in a deployment failure and the deployment - // auto-reverts to the latest stable job. - Job *Job - - // An optional evaluation to create after promoting the canaries - Eval *Evaluation -} - -// DeploymentPromoteRequest is used to promote task groups in a deployment -type DeploymentPromoteRequest struct { - DeploymentID string - - // All is to promote all task groups - All bool - - // Groups is used to set the promotion status per task group - Groups []string - - WriteRequest -} - -// ApplyDeploymentPromoteRequest is used to apply a promotion request via Raft -type ApplyDeploymentPromoteRequest struct { - DeploymentPromoteRequest - - // An optional evaluation to create after promoting the canaries - Eval *Evaluation -} - -// DeploymentPauseRequest is used to pause a deployment -type DeploymentPauseRequest struct { - DeploymentID string - - // Pause sets the pause status - Pause bool - - WriteRequest -} - -// DeploymentSpecificRequest is used to make a request specific to a particular -// deployment -type DeploymentSpecificRequest struct { - DeploymentID string - QueryOptions -} - -// DeploymentFailRequest is used to fail a particular deployment -type DeploymentFailRequest struct { - DeploymentID string - WriteRequest -} - -// SingleDeploymentResponse is used to respond with a single deployment -type SingleDeploymentResponse struct { - Deployment *Deployment - QueryMeta -} - -// GenericResponse is used to respond to a request where no -// specific response information is needed. -type GenericResponse struct { - WriteMeta -} - -// VersionResponse is used for the Status.Version reseponse -type VersionResponse struct { - Build string - Versions map[string]int - QueryMeta -} - -// JobRegisterResponse is used to respond to a job registration -type JobRegisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string - - QueryMeta -} - -// JobDeregisterResponse is used to respond to a job deregistration -type JobDeregisterResponse struct { - EvalID string - EvalCreateIndex uint64 - JobModifyIndex uint64 - QueryMeta -} - -// JobValidateResponse is the response from validate request -type JobValidateResponse struct { - // DriverConfigValidated indicates whether the agent validated the driver - // config - DriverConfigValidated bool - - // ValidationErrors is a list of validation errors - ValidationErrors []string - - // Error is a string version of any error that may have occurred - Error string - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string -} - -// NodeUpdateResponse is used to respond to a node update -type NodeUpdateResponse struct { - HeartbeatTTL time.Duration - EvalIDs []string - EvalCreateIndex uint64 - NodeModifyIndex uint64 - - // LeaderRPCAddr is the RPC address of the current Raft Leader. If - // empty, the current Nomad Server is in the minority of a partition. - LeaderRPCAddr string - - // NumNodes is the number of Nomad nodes attached to this quorum of - // Nomad Servers at the time of the response. This value can - // fluctuate based on the health of the cluster between heartbeats. - NumNodes int32 - - // Servers is the full list of known Nomad servers in the local - // region. - Servers []*NodeServerInfo - - QueryMeta -} - -// NodeDrainUpdateResponse is used to respond to a node drain update -type NodeDrainUpdateResponse struct { - EvalIDs []string - EvalCreateIndex uint64 - NodeModifyIndex uint64 - QueryMeta -} - -// NodeAllocsResponse is used to return allocs for a single node -type NodeAllocsResponse struct { - Allocs []*Allocation - QueryMeta -} - -// NodeClientAllocsResponse is used to return allocs meta data for a single node -type NodeClientAllocsResponse struct { - Allocs map[string]uint64 - QueryMeta -} - -// SingleNodeResponse is used to return a single node -type SingleNodeResponse struct { - Node *Node - QueryMeta -} - -// NodeListResponse is used for a list request -type NodeListResponse struct { - Nodes []*NodeListStub - QueryMeta -} - -// SingleJobResponse is used to return a single job -type SingleJobResponse struct { - Job *Job - QueryMeta -} - -// JobSummaryResponse is used to return a single job summary -type JobSummaryResponse struct { - JobSummary *JobSummary - QueryMeta -} - -type JobDispatchResponse struct { - DispatchedJobID string - EvalID string - EvalCreateIndex uint64 - JobCreateIndex uint64 - WriteMeta -} - -// JobListResponse is used for a list request -type JobListResponse struct { - Jobs []*JobListStub - QueryMeta -} - -// JobVersionsRequest is used to get a jobs versions -type JobVersionsRequest struct { - JobID string - Diffs bool - QueryOptions -} - -// JobVersionsResponse is used for a job get versions request -type JobVersionsResponse struct { - Versions []*Job - Diffs []*JobDiff - QueryMeta -} - -// JobPlanResponse is used to respond to a job plan request -type JobPlanResponse struct { - // Annotations stores annotations explaining decisions the scheduler made. - Annotations *PlanAnnotations - - // FailedTGAllocs is the placement failures per task group. - FailedTGAllocs map[string]*AllocMetric - - // JobModifyIndex is the modification index of the job. The value can be - // used when running `nomad run` to ensure that the Job wasn’t modified - // since the last plan. If the job is being created, the value is zero. - JobModifyIndex uint64 - - // CreatedEvals is the set of evaluations created by the scheduler. The - // reasons for this can be rolling-updates or blocked evals. - CreatedEvals []*Evaluation - - // Diff contains the diff of the job and annotations on whether the change - // causes an in-place update or create/destroy - Diff *JobDiff - - // NextPeriodicLaunch is the time duration till the job would be launched if - // submitted. - NextPeriodicLaunch time.Time - - // Warnings contains any warnings about the given job. These may include - // deprecation warnings. - Warnings string - - WriteMeta -} - -// SingleAllocResponse is used to return a single allocation -type SingleAllocResponse struct { - Alloc *Allocation - QueryMeta -} - -// AllocsGetResponse is used to return a set of allocations -type AllocsGetResponse struct { - Allocs []*Allocation - QueryMeta -} - -// JobAllocationsResponse is used to return the allocations for a job -type JobAllocationsResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// JobEvaluationsResponse is used to return the evaluations for a job -type JobEvaluationsResponse struct { - Evaluations []*Evaluation - QueryMeta -} - -// SingleEvalResponse is used to return a single evaluation -type SingleEvalResponse struct { - Eval *Evaluation - QueryMeta -} - -// EvalDequeueResponse is used to return from a dequeue -type EvalDequeueResponse struct { - Eval *Evaluation - Token string - - // WaitIndex is the Raft index the worker should wait until invoking the - // scheduler. - WaitIndex uint64 - - QueryMeta -} - -// GetWaitIndex is used to retrieve the Raft index in which state should be at -// or beyond before invoking the scheduler. -func (e *EvalDequeueResponse) GetWaitIndex() uint64 { - // Prefer the wait index sent. This will be populated on all responses from - // 0.7.0 and above - if e.WaitIndex != 0 { - return e.WaitIndex - } else if e.Eval != nil { - return e.Eval.ModifyIndex - } - - // This should never happen - return 1 -} - -// PlanResponse is used to return from a PlanRequest -type PlanResponse struct { - Result *PlanResult - WriteMeta -} - -// AllocListResponse is used for a list request -type AllocListResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// DeploymentListResponse is used for a list request -type DeploymentListResponse struct { - Deployments []*Deployment - QueryMeta -} - -// EvalListResponse is used for a list request -type EvalListResponse struct { - Evaluations []*Evaluation - QueryMeta -} - -// EvalAllocationsResponse is used to return the allocations for an evaluation -type EvalAllocationsResponse struct { - Allocations []*AllocListStub - QueryMeta -} - -// PeriodicForceResponse is used to respond to a periodic job force launch -type PeriodicForceResponse struct { - EvalID string - EvalCreateIndex uint64 - WriteMeta -} - -// DeploymentUpdateResponse is used to respond to a deployment change. The -// response will include the modify index of the deployment as well as details -// of any triggered evaluation. -type DeploymentUpdateResponse struct { - EvalID string - EvalCreateIndex uint64 - DeploymentModifyIndex uint64 - - // RevertedJobVersion is the version the job was reverted to. If unset, the - // job wasn't reverted - RevertedJobVersion *uint64 - - WriteMeta -} - -const ( - NodeStatusInit = "initializing" - NodeStatusReady = "ready" - NodeStatusDown = "down" -) - -// ShouldDrainNode checks if a given node status should trigger an -// evaluation. Some states don't require any further action. -func ShouldDrainNode(status string) bool { - switch status { - case NodeStatusInit, NodeStatusReady: - return false - case NodeStatusDown: - return true - default: - panic(fmt.Sprintf("unhandled node status %s", status)) - } -} - -// ValidNodeStatus is used to check if a node status is valid -func ValidNodeStatus(status string) bool { - switch status { - case NodeStatusInit, NodeStatusReady, NodeStatusDown: - return true - default: - return false - } -} - -// Node is a representation of a schedulable client node -type Node struct { - // ID is a unique identifier for the node. It can be constructed - // by doing a concatenation of the Name and Datacenter as a simple - // approach. Alternatively a UUID may be used. - ID string - - // SecretID is an ID that is only known by the Node and the set of Servers. - // It is not accessible via the API and is used to authenticate nodes - // conducting priviledged activities. - SecretID string - - // Datacenter for this node - Datacenter string - - // Node name - Name string - - // HTTPAddr is the address on which the Nomad client is listening for http - // requests - HTTPAddr string - - // TLSEnabled indicates if the Agent has TLS enabled for the HTTP API - TLSEnabled bool - - // Attributes is an arbitrary set of key/value - // data that can be used for constraints. Examples - // include "kernel.name=linux", "arch=386", "driver.docker=1", - // "docker.runtime=1.8.3" - Attributes map[string]string - - // Resources is the available resources on the client. - // For example 'cpu=2' 'memory=2048' - Resources *Resources - - // Reserved is the set of resources that are reserved, - // and should be subtracted from the total resources for - // the purposes of scheduling. This may be provide certain - // high-watermark tolerances or because of external schedulers - // consuming resources. - Reserved *Resources - - // Links are used to 'link' this client to external - // systems. For example 'consul=foo.dc1' 'aws=i-83212' - // 'ami=ami-123' - Links map[string]string - - // Meta is used to associate arbitrary metadata with this - // client. This is opaque to Nomad. - Meta map[string]string - - // NodeClass is an opaque identifier used to group nodes - // together for the purpose of determining scheduling pressure. - NodeClass string - - // ComputedClass is a unique id that identifies nodes with a common set of - // attributes and capabilities. - ComputedClass string - - // Drain is controlled by the servers, and not the client. - // If true, no jobs will be scheduled to this node, and existing - // allocations will be drained. - Drain bool - - // Status of this node - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // StatusUpdatedAt is the time stamp at which the state of the node was - // updated - StatusUpdatedAt int64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// Ready returns if the node is ready for running allocations -func (n *Node) Ready() bool { - return n.Status == NodeStatusReady && !n.Drain -} - -func (n *Node) Copy() *Node { - if n == nil { - return nil - } - nn := new(Node) - *nn = *n - nn.Attributes = helper.CopyMapStringString(nn.Attributes) - nn.Resources = nn.Resources.Copy() - nn.Reserved = nn.Reserved.Copy() - nn.Links = helper.CopyMapStringString(nn.Links) - nn.Meta = helper.CopyMapStringString(nn.Meta) - return nn -} - -// TerminalStatus returns if the current status is terminal and -// will no longer transition. -func (n *Node) TerminalStatus() bool { - switch n.Status { - case NodeStatusDown: - return true - default: - return false - } -} - -// Stub returns a summarized version of the node -func (n *Node) Stub() *NodeListStub { - return &NodeListStub{ - ID: n.ID, - Datacenter: n.Datacenter, - Name: n.Name, - NodeClass: n.NodeClass, - Version: n.Attributes["nomad.version"], - Drain: n.Drain, - Status: n.Status, - StatusDescription: n.StatusDescription, - CreateIndex: n.CreateIndex, - ModifyIndex: n.ModifyIndex, - } -} - -// NodeListStub is used to return a subset of job information -// for the job list -type NodeListStub struct { - ID string - Datacenter string - Name string - NodeClass string - Version string - Drain bool - Status string - StatusDescription string - CreateIndex uint64 - ModifyIndex uint64 -} - -// Networks defined for a task on the Resources struct. -type Networks []*NetworkResource - -// Port assignment and IP for the given label or empty values. -func (ns Networks) Port(label string) (string, int) { - for _, n := range ns { - for _, p := range n.ReservedPorts { - if p.Label == label { - return n.IP, p.Value - } - } - for _, p := range n.DynamicPorts { - if p.Label == label { - return n.IP, p.Value - } - } - } - return "", 0 -} - -// Resources is used to define the resources available -// on a client -type Resources struct { - CPU int - MemoryMB int - DiskMB int - IOPS int - Networks Networks -} - -const ( - BytesInMegabyte = 1024 * 1024 -) - -// DefaultResources returns the default resources for a task. -func DefaultResources() *Resources { - return &Resources{ - CPU: 100, - MemoryMB: 10, - IOPS: 0, - } -} - -// DiskInBytes returns the amount of disk resources in bytes. -func (r *Resources) DiskInBytes() int64 { - return int64(r.DiskMB * BytesInMegabyte) -} - -// Merge merges this resource with another resource. -func (r *Resources) Merge(other *Resources) { - if other.CPU != 0 { - r.CPU = other.CPU - } - if other.MemoryMB != 0 { - r.MemoryMB = other.MemoryMB - } - if other.DiskMB != 0 { - r.DiskMB = other.DiskMB - } - if other.IOPS != 0 { - r.IOPS = other.IOPS - } - if len(other.Networks) != 0 { - r.Networks = other.Networks - } -} - -func (r *Resources) Canonicalize() { - // Ensure that an empty and nil slices are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(r.Networks) == 0 { - r.Networks = nil - } - - for _, n := range r.Networks { - n.Canonicalize() - } -} - -// MeetsMinResources returns an error if the resources specified are less than -// the minimum allowed. -func (r *Resources) MeetsMinResources() error { - var mErr multierror.Error - if r.CPU < 20 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum CPU value is 20; got %d", r.CPU)) - } - if r.MemoryMB < 10 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MemoryMB value is 10; got %d", r.MemoryMB)) - } - if r.IOPS < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum IOPS value is 0; got %d", r.IOPS)) - } - for i, n := range r.Networks { - if err := n.MeetsMinResources(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("network resource at index %d failed: %v", i, err)) - } - } - - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the resources -func (r *Resources) Copy() *Resources { - if r == nil { - return nil - } - newR := new(Resources) - *newR = *r - if r.Networks != nil { - n := len(r.Networks) - newR.Networks = make([]*NetworkResource, n) - for i := 0; i < n; i++ { - newR.Networks[i] = r.Networks[i].Copy() - } - } - return newR -} - -// NetIndex finds the matching net index using device name -func (r *Resources) NetIndex(n *NetworkResource) int { - for idx, net := range r.Networks { - if net.Device == n.Device { - return idx - } - } - return -1 -} - -// Superset checks if one set of resources is a superset -// of another. This ignores network resources, and the NetworkIndex -// should be used for that. -func (r *Resources) Superset(other *Resources) (bool, string) { - if r.CPU < other.CPU { - return false, "cpu exhausted" - } - if r.MemoryMB < other.MemoryMB { - return false, "memory exhausted" - } - if r.DiskMB < other.DiskMB { - return false, "disk exhausted" - } - if r.IOPS < other.IOPS { - return false, "iops exhausted" - } - return true, "" -} - -// Add adds the resources of the delta to this, potentially -// returning an error if not possible. -func (r *Resources) Add(delta *Resources) error { - if delta == nil { - return nil - } - r.CPU += delta.CPU - r.MemoryMB += delta.MemoryMB - r.DiskMB += delta.DiskMB - r.IOPS += delta.IOPS - - for _, n := range delta.Networks { - // Find the matching interface by IP or CIDR - idx := r.NetIndex(n) - if idx == -1 { - r.Networks = append(r.Networks, n.Copy()) - } else { - r.Networks[idx].Add(n) - } - } - return nil -} - -func (r *Resources) GoString() string { - return fmt.Sprintf("*%#v", *r) -} - -type Port struct { - Label string - Value int -} - -// NetworkResource is used to represent available network -// resources -type NetworkResource struct { - Device string // Name of the device - CIDR string // CIDR block of addresses - IP string // Host IP address - MBits int // Throughput - ReservedPorts []Port // Host Reserved ports - DynamicPorts []Port // Host Dynamically assigned ports -} - -func (n *NetworkResource) Canonicalize() { - // Ensure that an empty and nil slices are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(n.ReservedPorts) == 0 { - n.ReservedPorts = nil - } - if len(n.DynamicPorts) == 0 { - n.DynamicPorts = nil - } -} - -// MeetsMinResources returns an error if the resources specified are less than -// the minimum allowed. -func (n *NetworkResource) MeetsMinResources() error { - var mErr multierror.Error - if n.MBits < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum MBits value is 1; got %d", n.MBits)) - } - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the network resource -func (n *NetworkResource) Copy() *NetworkResource { - if n == nil { - return nil - } - newR := new(NetworkResource) - *newR = *n - if n.ReservedPorts != nil { - newR.ReservedPorts = make([]Port, len(n.ReservedPorts)) - copy(newR.ReservedPorts, n.ReservedPorts) - } - if n.DynamicPorts != nil { - newR.DynamicPorts = make([]Port, len(n.DynamicPorts)) - copy(newR.DynamicPorts, n.DynamicPorts) - } - return newR -} - -// Add adds the resources of the delta to this, potentially -// returning an error if not possible. -func (n *NetworkResource) Add(delta *NetworkResource) { - if len(delta.ReservedPorts) > 0 { - n.ReservedPorts = append(n.ReservedPorts, delta.ReservedPorts...) - } - n.MBits += delta.MBits - n.DynamicPorts = append(n.DynamicPorts, delta.DynamicPorts...) -} - -func (n *NetworkResource) GoString() string { - return fmt.Sprintf("*%#v", *n) -} - -// PortLabels returns a map of port labels to their assigned host ports. -func (n *NetworkResource) PortLabels() map[string]int { - num := len(n.ReservedPorts) + len(n.DynamicPorts) - labelValues := make(map[string]int, num) - for _, port := range n.ReservedPorts { - labelValues[port.Label] = port.Value - } - for _, port := range n.DynamicPorts { - labelValues[port.Label] = port.Value - } - return labelValues -} - -const ( - // JobTypeNomad is reserved for internal system tasks and is - // always handled by the CoreScheduler. - JobTypeCore = "_core" - JobTypeService = "service" - JobTypeBatch = "batch" - JobTypeSystem = "system" -) - -const ( - JobStatusPending = "pending" // Pending means the job is waiting on scheduling - JobStatusRunning = "running" // Running means the job has non-terminal allocations - JobStatusDead = "dead" // Dead means all evaluation's and allocations are terminal -) - -const ( - // JobMinPriority is the minimum allowed priority - JobMinPriority = 1 - - // JobDefaultPriority is the default priority if not - // not specified. - JobDefaultPriority = 50 - - // JobMaxPriority is the maximum allowed priority - JobMaxPriority = 100 - - // Ensure CoreJobPriority is higher than any user - // specified job so that it gets priority. This is important - // for the system to remain healthy. - CoreJobPriority = JobMaxPriority * 2 - - // JobTrackedVersions is the number of historic job versions that are - // kept. - JobTrackedVersions = 6 -) - -// Job is the scope of a scheduling request to Nomad. It is the largest -// scoped object, and is a named collection of task groups. Each task group -// is further composed of tasks. A task group (TG) is the unit of scheduling -// however. -type Job struct { - // Stop marks whether the user has stopped the job. A stopped job will - // have all created allocations stopped and acts as a way to stop a job - // without purging it from the system. This allows existing allocs to be - // queried and the job to be inspected as it is being killed. - Stop bool - - // Region is the Nomad region that handles scheduling this job - Region string - - // Namespace is the namespace the job is submitted into. - Namespace string - - // ID is a unique identifier for the job per region. It can be - // specified hierarchically like LineOfBiz/OrgName/Team/Project - ID string - - // ParentID is the unique identifier of the job that spawned this job. - ParentID string - - // Name is the logical name of the job used to refer to it. This is unique - // per region, but not unique globally. - Name string - - // Type is used to control various behaviors about the job. Most jobs - // are service jobs, meaning they are expected to be long lived. - // Some jobs are batch oriented meaning they run and then terminate. - // This can be extended in the future to support custom schedulers. - Type string - - // Priority is used to control scheduling importance and if this job - // can preempt other jobs. - Priority int - - // AllAtOnce is used to control if incremental scheduling of task groups - // is allowed or if we must do a gang scheduling of the entire job. This - // can slow down larger jobs if resources are not available. - AllAtOnce bool - - // Datacenters contains all the datacenters this job is allowed to span - Datacenters []string - - // Constraints can be specified at a job level and apply to - // all the task groups and tasks. - Constraints []*Constraint - - // TaskGroups are the collections of task groups that this job needs - // to run. Each task group is an atomic unit of scheduling and placement. - TaskGroups []*TaskGroup - - // COMPAT: Remove in 0.7.0. Stagger is deprecated in 0.6.0. - Update UpdateStrategy - - // Periodic is used to define the interval the job is run at. - Periodic *PeriodicConfig - - // ParameterizedJob is used to specify the job as a parameterized job - // for dispatching. - ParameterizedJob *ParameterizedJobConfig - - // Payload is the payload supplied when the job was dispatched. - Payload []byte - - // Meta is used to associate arbitrary metadata with this - // job. This is opaque to Nomad. - Meta map[string]string - - // VaultToken is the Vault token that proves the submitter of the job has - // access to the specified Vault policies. This field is only used to - // transfer the token and is not stored after Job submission. - VaultToken string - - // Job status - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // Stable marks a job as stable. Stability is only defined on "service" and - // "system" jobs. The stability of a job will be set automatically as part - // of a deployment and can be manually set via APIs. - Stable bool - - // Version is a monitonically increasing version number that is incremened - // on each job register. - Version uint64 - - // SubmitTime is the time at which the job was submitted as a UnixNano in - // UTC - SubmitTime int64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 -} - -// Canonicalize is used to canonicalize fields in the Job. This should be called -// when registering a Job. A set of warnings are returned if the job was changed -// in anyway that the user should be made aware of. -func (j *Job) Canonicalize() (warnings error) { - if j == nil { - return nil - } - - var mErr multierror.Error - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(j.Meta) == 0 { - j.Meta = nil - } - - // Ensure the job is in a namespace. - if j.Namespace == "" { - j.Namespace = DefaultNamespace - } - - for _, tg := range j.TaskGroups { - tg.Canonicalize(j) - } - - if j.ParameterizedJob != nil { - j.ParameterizedJob.Canonicalize() - } - - if j.Periodic != nil { - j.Periodic.Canonicalize() - } - - // COMPAT: Remove in 0.7.0 - // Rewrite any job that has an update block with pre 0.6.0 syntax. - jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0 - if jobHasOldUpdate && j.Type != JobTypeBatch { - // Build an appropriate update block and copy it down to each task group - base := DefaultUpdateStrategy.Copy() - base.MaxParallel = j.Update.MaxParallel - base.MinHealthyTime = j.Update.Stagger - - // Add to each task group, modifying as needed - upgraded := false - l := len(j.TaskGroups) - for _, tg := range j.TaskGroups { - // The task group doesn't need upgrading if it has an update block with the new syntax - u := tg.Update - if u != nil && u.Stagger > 0 && u.MaxParallel > 0 && - u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 { - continue - } - - upgraded = true - - // The MaxParallel for the job should be 10% of the total count - // unless there is just one task group then we can infer the old - // max parallel should be the new - tgu := base.Copy() - if l != 1 { - // RoundTo 10% - var percent float64 = float64(tg.Count) * 0.1 - tgu.MaxParallel = int(percent + 0.5) - } - - // Safety guards - if tgu.MaxParallel == 0 { - tgu.MaxParallel = 1 - } else if tgu.MaxParallel > tg.Count { - tgu.MaxParallel = tg.Count - } - - tg.Update = tgu - } - - if upgraded { - w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " + - "Please update upgrade stanza before v0.7.0." - multierror.Append(&mErr, fmt.Errorf(w)) - } - } - - // Ensure that the batch job doesn't have new style or old style update - // stanza. Unfortunately are scanning here because we have to deprecate over - // a release so we can't check in the task group since that may be new style - // but wouldn't capture the old style and we don't want to have duplicate - // warnings. - if j.Type == JobTypeBatch { - displayWarning := jobHasOldUpdate - j.Update.Stagger = 0 - j.Update.MaxParallel = 0 - j.Update.HealthCheck = "" - j.Update.MinHealthyTime = 0 - j.Update.HealthyDeadline = 0 - j.Update.AutoRevert = false - j.Update.Canary = 0 - - // Remove any update spec from the task groups - for _, tg := range j.TaskGroups { - if tg.Update != nil { - displayWarning = true - tg.Update = nil - } - } - - if displayWarning { - w := "Update stanza is disallowed for batch jobs since v0.6.0. " + - "The update block has automatically been removed" - multierror.Append(&mErr, fmt.Errorf(w)) - } - } - - return mErr.ErrorOrNil() -} - -// Copy returns a deep copy of the Job. It is expected that callers use recover. -// This job can panic if the deep copy failed as it uses reflection. -func (j *Job) Copy() *Job { - if j == nil { - return nil - } - nj := new(Job) - *nj = *j - nj.Datacenters = helper.CopySliceString(nj.Datacenters) - nj.Constraints = CopySliceConstraints(nj.Constraints) - - if j.TaskGroups != nil { - tgs := make([]*TaskGroup, len(nj.TaskGroups)) - for i, tg := range nj.TaskGroups { - tgs[i] = tg.Copy() - } - nj.TaskGroups = tgs - } - - nj.Periodic = nj.Periodic.Copy() - nj.Meta = helper.CopyMapStringString(nj.Meta) - nj.ParameterizedJob = nj.ParameterizedJob.Copy() - return nj -} - -// Validate is used to sanity check a job input -func (j *Job) Validate() error { - var mErr multierror.Error - - if j.Region == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) - } - if j.ID == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job ID")) - } else if strings.Contains(j.ID, " ") { - mErr.Errors = append(mErr.Errors, errors.New("Job ID contains a space")) - } - if j.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing job name")) - } - if j.Namespace == "" { - mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) - } - switch j.Type { - case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem: - case "": - mErr.Errors = append(mErr.Errors, errors.New("Missing job type")) - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("Invalid job type: %q", j.Type)) - } - if j.Priority < JobMinPriority || j.Priority > JobMaxPriority { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job priority must be between [%d, %d]", JobMinPriority, JobMaxPriority)) - } - if len(j.Datacenters) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing job datacenters")) - } - if len(j.TaskGroups) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups")) - } - for idx, constr := range j.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Check for duplicate task groups - taskGroups := make(map[string]int) - for idx, tg := range j.TaskGroups { - if tg.Name == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d missing name", idx+1)) - } else if existing, ok := taskGroups[tg.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Job task group %d redefines '%s' from group %d", idx+1, tg.Name, existing+1)) - } else { - taskGroups[tg.Name] = idx - } - - if j.Type == "system" && tg.Count > 1 { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Job task group %s has count %d. Count cannot exceed 1 with system scheduler", - tg.Name, tg.Count)) - } - } - - // Validate the task group - for _, tg := range j.TaskGroups { - if err := tg.Validate(j); err != nil { - outer := fmt.Errorf("Task group %s validation failed: %v", tg.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Validate periodic is only used with batch jobs. - if j.IsPeriodic() && j.Periodic.Enabled { - if j.Type != JobTypeBatch { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Periodic can only be used with %q scheduler", JobTypeBatch)) - } - - if err := j.Periodic.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - if j.IsParameterized() { - if j.Type != JobTypeBatch { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Parameterized job can only be used with %q scheduler", JobTypeBatch)) - } - - if err := j.ParameterizedJob.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - return mErr.ErrorOrNil() -} - -// Warnings returns a list of warnings that may be from dubious settings or -// deprecation warnings. -func (j *Job) Warnings() error { - var mErr multierror.Error - - // Check the groups - for _, tg := range j.TaskGroups { - if err := tg.Warnings(j); err != nil { - outer := fmt.Errorf("Group %q has warnings: %v", tg.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - return mErr.ErrorOrNil() -} - -// LookupTaskGroup finds a task group by name -func (j *Job) LookupTaskGroup(name string) *TaskGroup { - for _, tg := range j.TaskGroups { - if tg.Name == name { - return tg - } - } - return nil -} - -// CombinedTaskMeta takes a TaskGroup and Task name and returns the combined -// meta data for the task. When joining Job, Group and Task Meta, the precedence -// is by deepest scope (Task > Group > Job). -func (j *Job) CombinedTaskMeta(groupName, taskName string) map[string]string { - group := j.LookupTaskGroup(groupName) - if group == nil { - return nil - } - - task := group.LookupTask(taskName) - if task == nil { - return nil - } - - meta := helper.CopyMapStringString(task.Meta) - if meta == nil { - meta = make(map[string]string, len(group.Meta)+len(j.Meta)) - } - - // Add the group specific meta - for k, v := range group.Meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - - // Add the job specific meta - for k, v := range j.Meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - - return meta -} - -// Stopped returns if a job is stopped. -func (j *Job) Stopped() bool { - return j == nil || j.Stop -} - -// HasUpdateStrategy returns if any task group in the job has an update strategy -func (j *Job) HasUpdateStrategy() bool { - for _, tg := range j.TaskGroups { - if tg.Update != nil { - return true - } - } - - return false -} - -// Stub is used to return a summary of the job -func (j *Job) Stub(summary *JobSummary) *JobListStub { - return &JobListStub{ - ID: j.ID, - ParentID: j.ParentID, - Name: j.Name, - Type: j.Type, - Priority: j.Priority, - Periodic: j.IsPeriodic(), - ParameterizedJob: j.IsParameterized(), - Stop: j.Stop, - Status: j.Status, - StatusDescription: j.StatusDescription, - CreateIndex: j.CreateIndex, - ModifyIndex: j.ModifyIndex, - JobModifyIndex: j.JobModifyIndex, - SubmitTime: j.SubmitTime, - JobSummary: summary, - } -} - -// IsPeriodic returns whether a job is periodic. -func (j *Job) IsPeriodic() bool { - return j.Periodic != nil -} - -// IsParameterized returns whether a job is parameterized job. -func (j *Job) IsParameterized() bool { - return j.ParameterizedJob != nil -} - -// VaultPolicies returns the set of Vault policies per task group, per task -func (j *Job) VaultPolicies() map[string]map[string]*Vault { - policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) - - for _, tg := range j.TaskGroups { - tgPolicies := make(map[string]*Vault, len(tg.Tasks)) - - for _, task := range tg.Tasks { - if task.Vault == nil { - continue - } - - tgPolicies[task.Name] = task.Vault - } - - if len(tgPolicies) != 0 { - policies[tg.Name] = tgPolicies - } - } - - return policies -} - -// RequiredSignals returns a mapping of task groups to tasks to their required -// set of signals -func (j *Job) RequiredSignals() map[string]map[string][]string { - signals := make(map[string]map[string][]string) - - for _, tg := range j.TaskGroups { - for _, task := range tg.Tasks { - // Use this local one as a set - taskSignals := make(map[string]struct{}) - - // Check if the Vault change mode uses signals - if task.Vault != nil && task.Vault.ChangeMode == VaultChangeModeSignal { - taskSignals[task.Vault.ChangeSignal] = struct{}{} - } - - // Check if any template change mode uses signals - for _, t := range task.Templates { - if t.ChangeMode != TemplateChangeModeSignal { - continue - } - - taskSignals[t.ChangeSignal] = struct{}{} - } - - // Flatten and sort the signals - l := len(taskSignals) - if l == 0 { - continue - } - - flat := make([]string, 0, l) - for sig := range taskSignals { - flat = append(flat, sig) - } - - sort.Strings(flat) - tgSignals, ok := signals[tg.Name] - if !ok { - tgSignals = make(map[string][]string) - signals[tg.Name] = tgSignals - } - tgSignals[task.Name] = flat - } - - } - - return signals -} - -// SpecChanged determines if the functional specification has changed between -// two job versions. -func (j *Job) SpecChanged(new *Job) bool { - if j == nil { - return new != nil - } - - // Create a copy of the new job - c := new.Copy() - - // Update the new job so we can do a reflect - c.Status = j.Status - c.StatusDescription = j.StatusDescription - c.Stable = j.Stable - c.Version = j.Version - c.CreateIndex = j.CreateIndex - c.ModifyIndex = j.ModifyIndex - c.JobModifyIndex = j.JobModifyIndex - c.SubmitTime = j.SubmitTime - - // Deep equals the jobs - return !reflect.DeepEqual(j, c) -} - -func (j *Job) SetSubmitTime() { - j.SubmitTime = time.Now().UTC().UnixNano() -} - -// JobListStub is used to return a subset of job information -// for the job list -type JobListStub struct { - ID string - ParentID string - Name string - Type string - Priority int - Periodic bool - ParameterizedJob bool - Stop bool - Status string - StatusDescription string - JobSummary *JobSummary - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 - SubmitTime int64 -} - -// JobSummary summarizes the state of the allocations of a job -type JobSummary struct { - // JobID is the ID of the job the summary is for - JobID string - - // Namespace is the namespace of the job and its summary - Namespace string - - // Summmary contains the summary per task group for the Job - Summary map[string]TaskGroupSummary - - // Children contains a summary for the children of this job. - Children *JobChildrenSummary - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// Copy returns a new copy of JobSummary -func (js *JobSummary) Copy() *JobSummary { - newJobSummary := new(JobSummary) - *newJobSummary = *js - newTGSummary := make(map[string]TaskGroupSummary, len(js.Summary)) - for k, v := range js.Summary { - newTGSummary[k] = v - } - newJobSummary.Summary = newTGSummary - newJobSummary.Children = newJobSummary.Children.Copy() - return newJobSummary -} - -// JobChildrenSummary contains the summary of children job statuses -type JobChildrenSummary struct { - Pending int64 - Running int64 - Dead int64 -} - -// Copy returns a new copy of a JobChildrenSummary -func (jc *JobChildrenSummary) Copy() *JobChildrenSummary { - if jc == nil { - return nil - } - - njc := new(JobChildrenSummary) - *njc = *jc - return njc -} - -// TaskGroup summarizes the state of all the allocations of a particular -// TaskGroup -type TaskGroupSummary struct { - Queued int - Complete int - Failed int - Running int - Starting int - Lost int -} - -const ( - // Checks uses any registered health check state in combination with task - // states to determine if a allocation is healthy. - UpdateStrategyHealthCheck_Checks = "checks" - - // TaskStates uses the task states of an allocation to determine if the - // allocation is healthy. - UpdateStrategyHealthCheck_TaskStates = "task_states" - - // Manual allows the operator to manually signal to Nomad when an - // allocations is healthy. This allows more advanced health checking that is - // outside of the scope of Nomad. - UpdateStrategyHealthCheck_Manual = "manual" -) - -var ( - // DefaultUpdateStrategy provides a baseline that can be used to upgrade - // jobs with the old policy or for populating field defaults. - DefaultUpdateStrategy = &UpdateStrategy{ - Stagger: 30 * time.Second, - MaxParallel: 1, - HealthCheck: UpdateStrategyHealthCheck_Checks, - MinHealthyTime: 10 * time.Second, - HealthyDeadline: 5 * time.Minute, - AutoRevert: false, - Canary: 0, - } -) - -// UpdateStrategy is used to modify how updates are done -type UpdateStrategy struct { - // Stagger is used to determine the rate at which allocations are migrated - // due to down or draining nodes. - Stagger time.Duration - - // MaxParallel is how many updates can be done in parallel - MaxParallel int - - // HealthCheck specifies the mechanism in which allocations are marked - // healthy or unhealthy as part of a deployment. - HealthCheck string - - // MinHealthyTime is the minimum time an allocation must be in the healthy - // state before it is marked as healthy, unblocking more alllocations to be - // rolled. - MinHealthyTime time.Duration - - // HealthyDeadline is the time in which an allocation must be marked as - // healthy before it is automatically transistioned to unhealthy. This time - // period doesn't count against the MinHealthyTime. - HealthyDeadline time.Duration - - // AutoRevert declares that if a deployment fails because of unhealthy - // allocations, there should be an attempt to auto-revert the job to a - // stable version. - AutoRevert bool - - // Canary is the number of canaries to deploy when a change to the task - // group is detected. - Canary int -} - -func (u *UpdateStrategy) Copy() *UpdateStrategy { - if u == nil { - return nil - } - - copy := new(UpdateStrategy) - *copy = *u - return copy -} - -func (u *UpdateStrategy) Validate() error { - if u == nil { - return nil - } - - var mErr multierror.Error - switch u.HealthCheck { - case UpdateStrategyHealthCheck_Checks, UpdateStrategyHealthCheck_TaskStates, UpdateStrategyHealthCheck_Manual: - default: - multierror.Append(&mErr, fmt.Errorf("Invalid health check given: %q", u.HealthCheck)) - } - - if u.MaxParallel < 1 { - multierror.Append(&mErr, fmt.Errorf("Max parallel can not be less than one: %d < 1", u.MaxParallel)) - } - if u.Canary < 0 { - multierror.Append(&mErr, fmt.Errorf("Canary count can not be less than zero: %d < 0", u.Canary)) - } - if u.MinHealthyTime < 0 { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time may not be less than zero: %v", u.MinHealthyTime)) - } - if u.HealthyDeadline <= 0 { - multierror.Append(&mErr, fmt.Errorf("Healthy deadline must be greater than zero: %v", u.HealthyDeadline)) - } - if u.MinHealthyTime >= u.HealthyDeadline { - multierror.Append(&mErr, fmt.Errorf("Minimum healthy time must be less than healthy deadline: %v > %v", u.MinHealthyTime, u.HealthyDeadline)) - } - if u.Stagger <= 0 { - multierror.Append(&mErr, fmt.Errorf("Stagger must be greater than zero: %v", u.Stagger)) - } - - return mErr.ErrorOrNil() -} - -// TODO(alexdadgar): Remove once no longer used by the scheduler. -// Rolling returns if a rolling strategy should be used -func (u *UpdateStrategy) Rolling() bool { - return u.Stagger > 0 && u.MaxParallel > 0 -} - -const ( - // PeriodicSpecCron is used for a cron spec. - PeriodicSpecCron = "cron" - - // PeriodicSpecTest is only used by unit tests. It is a sorted, comma - // separated list of unix timestamps at which to launch. - PeriodicSpecTest = "_internal_test" -) - -// Periodic defines the interval a job should be run at. -type PeriodicConfig struct { - // Enabled determines if the job should be run periodically. - Enabled bool - - // Spec specifies the interval the job should be run as. It is parsed based - // on the SpecType. - Spec string - - // SpecType defines the format of the spec. - SpecType string - - // ProhibitOverlap enforces that spawned jobs do not run in parallel. - ProhibitOverlap bool - - // TimeZone is the user specified string that determines the time zone to - // launch against. The time zones must be specified from IANA Time Zone - // database, such as "America/New_York". - // Reference: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - // Reference: https://www.iana.org/time-zones - TimeZone string - - // location is the time zone to evaluate the launch time against - location *time.Location -} - -func (p *PeriodicConfig) Copy() *PeriodicConfig { - if p == nil { - return nil - } - np := new(PeriodicConfig) - *np = *p - return np -} - -func (p *PeriodicConfig) Validate() error { - if !p.Enabled { - return nil - } - - var mErr multierror.Error - if p.Spec == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a spec")) - } - - // Check if we got a valid time zone - if p.TimeZone != "" { - if _, err := time.LoadLocation(p.TimeZone); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid time zone %q: %v", p.TimeZone, err)) - } - } - - switch p.SpecType { - case PeriodicSpecCron: - // Validate the cron spec - if _, err := cronexpr.Parse(p.Spec); err != nil { - multierror.Append(&mErr, fmt.Errorf("Invalid cron spec %q: %v", p.Spec, err)) - } - case PeriodicSpecTest: - // No-op - default: - multierror.Append(&mErr, fmt.Errorf("Unknown periodic specification type %q", p.SpecType)) - } - - return mErr.ErrorOrNil() -} - -func (p *PeriodicConfig) Canonicalize() { - // Load the location - l, err := time.LoadLocation(p.TimeZone) - if err != nil { - p.location = time.UTC - } - - p.location = l -} - -// Next returns the closest time instant matching the spec that is after the -// passed time. If no matching instance exists, the zero value of time.Time is -// returned. The `time.Location` of the returned value matches that of the -// passed time. -func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { - switch p.SpecType { - case PeriodicSpecCron: - if e, err := cronexpr.Parse(p.Spec); err == nil { - return e.Next(fromTime) - } - case PeriodicSpecTest: - split := strings.Split(p.Spec, ",") - if len(split) == 1 && split[0] == "" { - return time.Time{} - } - - // Parse the times - times := make([]time.Time, len(split)) - for i, s := range split { - unix, err := strconv.Atoi(s) - if err != nil { - return time.Time{} - } - - times[i] = time.Unix(int64(unix), 0) - } - - // Find the next match - for _, next := range times { - if fromTime.Before(next) { - return next - } - } - } - - return time.Time{} -} - -// GetLocation returns the location to use for determining the time zone to run -// the periodic job against. -func (p *PeriodicConfig) GetLocation() *time.Location { - // Jobs pre 0.5.5 will not have this - if p.location != nil { - return p.location - } - - return time.UTC -} - -const ( - // PeriodicLaunchSuffix is the string appended to the periodic jobs ID - // when launching derived instances of it. - PeriodicLaunchSuffix = "/periodic-" -) - -// PeriodicLaunch tracks the last launch time of a periodic job. -type PeriodicLaunch struct { - ID string // ID of the periodic job. - Namespace string // Namespace of the periodic job - Launch time.Time // The last launch time. - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -const ( - DispatchPayloadForbidden = "forbidden" - DispatchPayloadOptional = "optional" - DispatchPayloadRequired = "required" - - // DispatchLaunchSuffix is the string appended to the parameterized job's ID - // when dispatching instances of it. - DispatchLaunchSuffix = "/dispatch-" -) - -// ParameterizedJobConfig is used to configure the parameterized job -type ParameterizedJobConfig struct { - // Payload configure the payload requirements - Payload string - - // MetaRequired is metadata keys that must be specified by the dispatcher - MetaRequired []string - - // MetaOptional is metadata keys that may be specified by the dispatcher - MetaOptional []string -} - -func (d *ParameterizedJobConfig) Validate() error { - var mErr multierror.Error - switch d.Payload { - case DispatchPayloadOptional, DispatchPayloadRequired, DispatchPayloadForbidden: - default: - multierror.Append(&mErr, fmt.Errorf("Unknown payload requirement: %q", d.Payload)) - } - - // Check that the meta configurations are disjoint sets - disjoint, offending := helper.SliceSetDisjoint(d.MetaRequired, d.MetaOptional) - if !disjoint { - multierror.Append(&mErr, fmt.Errorf("Required and optional meta keys should be disjoint. Following keys exist in both: %v", offending)) - } - - return mErr.ErrorOrNil() -} - -func (d *ParameterizedJobConfig) Canonicalize() { - if d.Payload == "" { - d.Payload = DispatchPayloadOptional - } -} - -func (d *ParameterizedJobConfig) Copy() *ParameterizedJobConfig { - if d == nil { - return nil - } - nd := new(ParameterizedJobConfig) - *nd = *d - nd.MetaOptional = helper.CopySliceString(nd.MetaOptional) - nd.MetaRequired = helper.CopySliceString(nd.MetaRequired) - return nd -} - -// DispatchedID returns an ID appropriate for a job dispatched against a -// particular parameterized job -func DispatchedID(templateID string, t time.Time) string { - u := GenerateUUID()[:8] - return fmt.Sprintf("%s%s%d-%s", templateID, DispatchLaunchSuffix, t.Unix(), u) -} - -// DispatchPayloadConfig configures how a task gets its input from a job dispatch -type DispatchPayloadConfig struct { - // File specifies a relative path to where the input data should be written - File string -} - -func (d *DispatchPayloadConfig) Copy() *DispatchPayloadConfig { - if d == nil { - return nil - } - nd := new(DispatchPayloadConfig) - *nd = *d - return nd -} - -func (d *DispatchPayloadConfig) Validate() error { - // Verify the destination doesn't escape - escaped, err := PathEscapesAllocDir("task/local/", d.File) - if err != nil { - return fmt.Errorf("invalid destination path: %v", err) - } else if escaped { - return fmt.Errorf("destination escapes allocation directory") - } - - return nil -} - -var ( - defaultServiceJobRestartPolicy = RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 2, - Interval: 1 * time.Minute, - Mode: RestartPolicyModeDelay, - } - defaultBatchJobRestartPolicy = RestartPolicy{ - Delay: 15 * time.Second, - Attempts: 15, - Interval: 7 * 24 * time.Hour, - Mode: RestartPolicyModeDelay, - } -) - -const ( - // RestartPolicyModeDelay causes an artificial delay till the next interval is - // reached when the specified attempts have been reached in the interval. - RestartPolicyModeDelay = "delay" - - // RestartPolicyModeFail causes a job to fail if the specified number of - // attempts are reached within an interval. - RestartPolicyModeFail = "fail" - - // RestartPolicyMinInterval is the minimum interval that is accepted for a - // restart policy. - RestartPolicyMinInterval = 5 * time.Second -) - -// RestartPolicy configures how Tasks are restarted when they crash or fail. -type RestartPolicy struct { - // Attempts is the number of restart that will occur in an interval. - Attempts int - - // Interval is a duration in which we can limit the number of restarts - // within. - Interval time.Duration - - // Delay is the time between a failure and a restart. - Delay time.Duration - - // Mode controls what happens when the task restarts more than attempt times - // in an interval. - Mode string -} - -func (r *RestartPolicy) Copy() *RestartPolicy { - if r == nil { - return nil - } - nrp := new(RestartPolicy) - *nrp = *r - return nrp -} - -func (r *RestartPolicy) Validate() error { - var mErr multierror.Error - switch r.Mode { - case RestartPolicyModeDelay, RestartPolicyModeFail: - default: - multierror.Append(&mErr, fmt.Errorf("Unsupported restart mode: %q", r.Mode)) - } - - // Check for ambiguous/confusing settings - if r.Attempts == 0 && r.Mode != RestartPolicyModeFail { - multierror.Append(&mErr, fmt.Errorf("Restart policy %q with %d attempts is ambiguous", r.Mode, r.Attempts)) - } - - if r.Interval.Nanoseconds() < RestartPolicyMinInterval.Nanoseconds() { - multierror.Append(&mErr, fmt.Errorf("Interval can not be less than %v (got %v)", RestartPolicyMinInterval, r.Interval)) - } - if time.Duration(r.Attempts)*r.Delay > r.Interval { - multierror.Append(&mErr, - fmt.Errorf("Nomad can't restart the TaskGroup %v times in an interval of %v with a delay of %v", r.Attempts, r.Interval, r.Delay)) - } - return mErr.ErrorOrNil() -} - -func NewRestartPolicy(jobType string) *RestartPolicy { - switch jobType { - case JobTypeService, JobTypeSystem: - rp := defaultServiceJobRestartPolicy - return &rp - case JobTypeBatch: - rp := defaultBatchJobRestartPolicy - return &rp - } - return nil -} - -// TaskGroup is an atomic unit of placement. Each task group belongs to -// a job and may contain any number of tasks. A task group support running -// in many replicas using the same configuration.. -type TaskGroup struct { - // Name of the task group - Name string - - // Count is the number of replicas of this task group that should - // be scheduled. - Count int - - // Update is used to control the update strategy for this task group - Update *UpdateStrategy - - // Constraints can be specified at a task group level and apply to - // all the tasks contained. - Constraints []*Constraint - - //RestartPolicy of a TaskGroup - RestartPolicy *RestartPolicy - - // Tasks are the collection of tasks that this task group needs to run - Tasks []*Task - - // EphemeralDisk is the disk resources that the task group requests - EphemeralDisk *EphemeralDisk - - // Meta is used to associate arbitrary metadata with this - // task group. This is opaque to Nomad. - Meta map[string]string -} - -func (tg *TaskGroup) Copy() *TaskGroup { - if tg == nil { - return nil - } - ntg := new(TaskGroup) - *ntg = *tg - ntg.Update = ntg.Update.Copy() - ntg.Constraints = CopySliceConstraints(ntg.Constraints) - ntg.RestartPolicy = ntg.RestartPolicy.Copy() - - if tg.Tasks != nil { - tasks := make([]*Task, len(ntg.Tasks)) - for i, t := range ntg.Tasks { - tasks[i] = t.Copy() - } - ntg.Tasks = tasks - } - - ntg.Meta = helper.CopyMapStringString(ntg.Meta) - - if tg.EphemeralDisk != nil { - ntg.EphemeralDisk = tg.EphemeralDisk.Copy() - } - return ntg -} - -// Canonicalize is used to canonicalize fields in the TaskGroup. -func (tg *TaskGroup) Canonicalize(job *Job) { - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(tg.Meta) == 0 { - tg.Meta = nil - } - - // Set the default restart policy. - if tg.RestartPolicy == nil { - tg.RestartPolicy = NewRestartPolicy(job.Type) - } - - // Set a default ephemeral disk object if the user has not requested for one - if tg.EphemeralDisk == nil { - tg.EphemeralDisk = DefaultEphemeralDisk() - } - - for _, task := range tg.Tasks { - task.Canonicalize(job, tg) - } - - // Add up the disk resources to EphemeralDisk. This is done so that users - // are not required to move their disk attribute from resources to - // EphemeralDisk section of the job spec in Nomad 0.5 - // COMPAT 0.4.1 -> 0.5 - // Remove in 0.6 - var diskMB int - for _, task := range tg.Tasks { - diskMB += task.Resources.DiskMB - } - if diskMB > 0 { - tg.EphemeralDisk.SizeMB = diskMB - } -} - -// Validate is used to sanity check a task group -func (tg *TaskGroup) Validate(j *Job) error { - var mErr multierror.Error - if tg.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) - } - if tg.Count < 0 { - mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) - } - if len(tg.Tasks) == 0 { - mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) - } - for idx, constr := range tg.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - if tg.RestartPolicy != nil { - if err := tg.RestartPolicy.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } else { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) - } - - if tg.EphemeralDisk != nil { - if err := tg.EphemeralDisk.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } else { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) - } - - // Validate the update strategy - if u := tg.Update; u != nil { - switch j.Type { - case JobTypeService, JobTypeSystem: - default: - // COMPAT: Enable in 0.7.0 - //mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) - } - if err := u.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - } - - // Check for duplicate tasks, that there is only leader task if any, - // and no duplicated static ports - tasks := make(map[string]int) - staticPorts := make(map[int]string) - leaderTasks := 0 - for idx, task := range tg.Tasks { - if task.Name == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) - } else if existing, ok := tasks[task.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) - } else { - tasks[task.Name] = idx - } - - if task.Leader { - leaderTasks++ - } - - if task.Resources == nil { - continue - } - - for _, net := range task.Resources.Networks { - for _, port := range net.ReservedPorts { - if other, ok := staticPorts[port.Value]; ok { - err := fmt.Errorf("Static port %d already reserved by %s", port.Value, other) - mErr.Errors = append(mErr.Errors, err) - } else { - staticPorts[port.Value] = fmt.Sprintf("%s:%s", task.Name, port.Label) - } - } - } - } - - if leaderTasks > 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) - } - - // Validate the tasks - for _, task := range tg.Tasks { - if err := task.Validate(tg.EphemeralDisk); err != nil { - outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - return mErr.ErrorOrNil() -} - -// Warnings returns a list of warnings that may be from dubious settings or -// deprecation warnings. -func (tg *TaskGroup) Warnings(j *Job) error { - var mErr multierror.Error - - // Validate the update strategy - if u := tg.Update; u != nil { - // Check the counts are appropriate - if u.MaxParallel > tg.Count { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("Update max parallel count is greater than task group count (%d > %d). "+ - "A destructive change would result in the simultaneous replacement of all allocations.", u.MaxParallel, tg.Count)) - } - } - - return mErr.ErrorOrNil() -} - -// LookupTask finds a task by name -func (tg *TaskGroup) LookupTask(name string) *Task { - for _, t := range tg.Tasks { - if t.Name == name { - return t - } - } - return nil -} - -func (tg *TaskGroup) GoString() string { - return fmt.Sprintf("*%#v", *tg) -} - -// CheckRestart describes if and when a task should be restarted based on -// failing health checks. -type CheckRestart struct { - Limit int // Restart task after this many unhealthy intervals - Grace time.Duration // Grace time to give tasks after starting to get healthy - IgnoreWarnings bool // If true treat checks in `warning` as passing -} - -func (c *CheckRestart) Copy() *CheckRestart { - if c == nil { - return nil - } - - nc := new(CheckRestart) - *nc = *c - return nc -} - -func (c *CheckRestart) Validate() error { - if c == nil { - return nil - } - - var mErr multierror.Error - if c.Limit < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("limit must be greater than or equal to 0 but found %d", c.Limit)) - } - - if c.Grace < 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("grace period must be greater than or equal to 0 but found %d", c.Grace)) - } - - return mErr.ErrorOrNil() -} - -const ( - ServiceCheckHTTP = "http" - ServiceCheckTCP = "tcp" - ServiceCheckScript = "script" - - // minCheckInterval is the minimum check interval permitted. Consul - // currently has its MinInterval set to 1s. Mirror that here for - // consistency. - minCheckInterval = 1 * time.Second - - // minCheckTimeout is the minimum check timeout permitted for Consul - // script TTL checks. - minCheckTimeout = 1 * time.Second -) - -// The ServiceCheck data model represents the consul health check that -// Nomad registers for a Task -type ServiceCheck struct { - Name string // Name of the check, defaults to id - Type string // Type of the check - tcp, http, docker and script - Command string // Command is the command to run for script checks - Args []string // Args is a list of argumes for script checks - Path string // path of the health check url for http type check - Protocol string // Protocol to use if check is http, defaults to http - PortLabel string // The port to use for tcp/http checks - Interval time.Duration // Interval of the check - Timeout time.Duration // Timeout of the response from the check before consul fails the check - InitialStatus string // Initial status of the check - TLSSkipVerify bool // Skip TLS verification when Protocol=https - Method string // HTTP Method to use (GET by default) - Header map[string][]string // HTTP Headers for Consul to set when making HTTP checks - CheckRestart *CheckRestart // If and when a task should be restarted based on checks -} - -func (sc *ServiceCheck) Copy() *ServiceCheck { - if sc == nil { - return nil - } - nsc := new(ServiceCheck) - *nsc = *sc - nsc.Args = helper.CopySliceString(sc.Args) - nsc.Header = helper.CopyMapStringSliceString(sc.Header) - nsc.CheckRestart = sc.CheckRestart.Copy() - return nsc -} - -func (sc *ServiceCheck) Canonicalize(serviceName string) { - // Ensure empty maps/slices are treated as null to avoid scheduling - // issues when using DeepEquals. - if len(sc.Args) == 0 { - sc.Args = nil - } - - if len(sc.Header) == 0 { - sc.Header = nil - } else { - for k, v := range sc.Header { - if len(v) == 0 { - sc.Header[k] = nil - } - } - } - - if sc.Name == "" { - sc.Name = fmt.Sprintf("service: %q check", serviceName) - } -} - -// validate a Service's ServiceCheck -func (sc *ServiceCheck) validate() error { - switch strings.ToLower(sc.Type) { - case ServiceCheckTCP: - case ServiceCheckHTTP: - if sc.Path == "" { - return fmt.Errorf("http type must have a valid http path") - } - - case ServiceCheckScript: - if sc.Command == "" { - return fmt.Errorf("script type must have a valid script path") - } - default: - return fmt.Errorf(`invalid type (%+q), must be one of "http", "tcp", or "script" type`, sc.Type) - } - - if sc.Interval == 0 { - return fmt.Errorf("missing required value interval. Interval cannot be less than %v", minCheckInterval) - } else if sc.Interval < minCheckInterval { - return fmt.Errorf("interval (%v) cannot be lower than %v", sc.Interval, minCheckInterval) - } - - if sc.Timeout == 0 { - return fmt.Errorf("missing required value timeout. Timeout cannot be less than %v", minCheckInterval) - } else if sc.Timeout < minCheckTimeout { - return fmt.Errorf("timeout (%v) is lower than required minimum timeout %v", sc.Timeout, minCheckInterval) - } - - switch sc.InitialStatus { - case "": - // case api.HealthUnknown: TODO: Add when Consul releases 0.7.1 - case api.HealthPassing: - case api.HealthWarning: - case api.HealthCritical: - default: - return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical) - - } - - return sc.CheckRestart.Validate() -} - -// RequiresPort returns whether the service check requires the task has a port. -func (sc *ServiceCheck) RequiresPort() bool { - switch sc.Type { - case ServiceCheckHTTP, ServiceCheckTCP: - return true - default: - return false - } -} - -// TriggersRestarts returns true if this check should be watched and trigger a restart -// on failure. -func (sc *ServiceCheck) TriggersRestarts() bool { - return sc.CheckRestart != nil && sc.CheckRestart.Limit > 0 -} - -// Hash all ServiceCheck fields and the check's corresponding service ID to -// create an identifier. The identifier is not guaranteed to be unique as if -// the PortLabel is blank, the Service's PortLabel will be used after Hash is -// called. -func (sc *ServiceCheck) Hash(serviceID string) string { - h := sha1.New() - io.WriteString(h, serviceID) - io.WriteString(h, sc.Name) - io.WriteString(h, sc.Type) - io.WriteString(h, sc.Command) - io.WriteString(h, strings.Join(sc.Args, "")) - io.WriteString(h, sc.Path) - io.WriteString(h, sc.Protocol) - io.WriteString(h, sc.PortLabel) - io.WriteString(h, sc.Interval.String()) - io.WriteString(h, sc.Timeout.String()) - io.WriteString(h, sc.Method) - // Only include TLSSkipVerify if set to maintain ID stability with Nomad <0.6 - if sc.TLSSkipVerify { - io.WriteString(h, "true") - } - - // Since map iteration order isn't stable we need to write k/v pairs to - // a slice and sort it before hashing. - if len(sc.Header) > 0 { - headers := make([]string, 0, len(sc.Header)) - for k, v := range sc.Header { - headers = append(headers, k+strings.Join(v, "")) - } - sort.Strings(headers) - io.WriteString(h, strings.Join(headers, "")) - } - - return fmt.Sprintf("%x", h.Sum(nil)) -} - -const ( - AddressModeAuto = "auto" - AddressModeHost = "host" - AddressModeDriver = "driver" -) - -// Service represents a Consul service definition in Nomad -type Service struct { - // Name of the service registered with Consul. Consul defaults the - // Name to ServiceID if not specified. The Name if specified is used - // as one of the seed values when generating a Consul ServiceID. - Name string - - // PortLabel is either the numeric port number or the `host:port`. - // To specify the port number using the host's Consul Advertise - // address, specify an empty host in the PortLabel (e.g. `:port`). - PortLabel string - - // AddressMode specifies whether or not to use the host ip:port for - // this service. - AddressMode string - - Tags []string // List of tags for the service - Checks []*ServiceCheck // List of checks associated with the service -} - -func (s *Service) Copy() *Service { - if s == nil { - return nil - } - ns := new(Service) - *ns = *s - ns.Tags = helper.CopySliceString(ns.Tags) - - if s.Checks != nil { - checks := make([]*ServiceCheck, len(ns.Checks)) - for i, c := range ns.Checks { - checks[i] = c.Copy() - } - ns.Checks = checks - } - - return ns -} - -// Canonicalize interpolates values of Job, Task Group and Task in the Service -// Name. This also generates check names, service id and check ids. -func (s *Service) Canonicalize(job string, taskGroup string, task string) { - // Ensure empty lists are treated as null to avoid scheduler issues when - // using DeepEquals - if len(s.Tags) == 0 { - s.Tags = nil - } - if len(s.Checks) == 0 { - s.Checks = nil - } - - s.Name = args.ReplaceEnv(s.Name, map[string]string{ - "JOB": job, - "TASKGROUP": taskGroup, - "TASK": task, - "BASE": fmt.Sprintf("%s-%s-%s", job, taskGroup, task), - }, - ) - - for _, check := range s.Checks { - check.Canonicalize(s.Name) - } -} - -// Validate checks if the Check definition is valid -func (s *Service) Validate() error { - var mErr multierror.Error - - // Ensure the service name is valid per the below RFCs but make an exception - // for our interpolation syntax - // RFC-952 §1 (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 - // (https://tools.ietf.org/html/rfc1123), and RFC-2782 - // (https://tools.ietf.org/html/rfc2782). - re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9\$][a-zA-Z0-9\-\$\{\}\_\.]*[a-z0-9\}])$`) - if !re.MatchString(s.Name) { - mErr.Errors = append(mErr.Errors, fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes: %q", s.Name)) - } - - switch s.AddressMode { - case "", AddressModeAuto, AddressModeHost, AddressModeDriver: - // OK - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("service address_mode must be %q, %q, or %q; not %q", AddressModeAuto, AddressModeHost, AddressModeDriver, s.AddressMode)) - } - - for _, c := range s.Checks { - if s.PortLabel == "" && c.RequiresPort() { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: check requires a port but the service %+q has no port", c.Name, s.Name)) - continue - } - - if err := c.validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %s invalid: %v", c.Name, err)) - } - } - - return mErr.ErrorOrNil() -} - -// ValidateName checks if the services Name is valid and should be called after -// the name has been interpolated -func (s *Service) ValidateName(name string) error { - // Ensure the service name is valid per RFC-952 §1 - // (https://tools.ietf.org/html/rfc952), RFC-1123 §2.1 - // (https://tools.ietf.org/html/rfc1123), and RFC-2782 - // (https://tools.ietf.org/html/rfc2782). - re := regexp.MustCompile(`^(?i:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])$`) - if !re.MatchString(name) { - return fmt.Errorf("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes and must be no longer than 63 characters: %q", name) - } - return nil -} - -// Hash calculates the hash of the check based on it's content and the service -// which owns it -func (s *Service) Hash() string { - h := sha1.New() - io.WriteString(h, s.Name) - io.WriteString(h, strings.Join(s.Tags, "")) - io.WriteString(h, s.PortLabel) - io.WriteString(h, s.AddressMode) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -const ( - // DefaultKillTimeout is the default timeout between signaling a task it - // will be killed and killing it. - DefaultKillTimeout = 5 * time.Second -) - -// LogConfig provides configuration for log rotation -type LogConfig struct { - MaxFiles int - MaxFileSizeMB int -} - -// DefaultLogConfig returns the default LogConfig values. -func DefaultLogConfig() *LogConfig { - return &LogConfig{ - MaxFiles: 10, - MaxFileSizeMB: 10, - } -} - -// Validate returns an error if the log config specified are less than -// the minimum allowed. -func (l *LogConfig) Validate() error { - var mErr multierror.Error - if l.MaxFiles < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum number of files is 1; got %d", l.MaxFiles)) - } - if l.MaxFileSizeMB < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("minimum file size is 1MB; got %d", l.MaxFileSizeMB)) - } - return mErr.ErrorOrNil() -} - -// Task is a single process typically that is executed as part of a task group. -type Task struct { - // Name of the task - Name string - - // Driver is used to control which driver is used - Driver string - - // User is used to determine which user will run the task. It defaults to - // the same user the Nomad client is being run as. - User string - - // Config is provided to the driver to initialize - Config map[string]interface{} - - // Map of environment variables to be used by the driver - Env map[string]string - - // List of service definitions exposed by the Task - Services []*Service - - // Vault is used to define the set of Vault policies that this task should - // have access to. - Vault *Vault - - // Templates are the set of templates to be rendered for the task. - Templates []*Template - - // Constraints can be specified at a task level and apply only to - // the particular task. - Constraints []*Constraint - - // Resources is the resources needed by this task - Resources *Resources - - // DispatchPayload configures how the task retrieves its input from a dispatch - DispatchPayload *DispatchPayloadConfig - - // Meta is used to associate arbitrary metadata with this - // task. This is opaque to Nomad. - Meta map[string]string - - // KillTimeout is the time between signaling a task that it will be - // killed and killing it. - KillTimeout time.Duration - - // LogConfig provides configuration for log rotation - LogConfig *LogConfig - - // Artifacts is a list of artifacts to download and extract before running - // the task. - Artifacts []*TaskArtifact - - // Leader marks the task as the leader within the group. When the leader - // task exits, other tasks will be gracefully terminated. - Leader bool - - // ShutdownDelay is the duration of the delay between deregistering a - // task from Consul and sending it a signal to shutdown. See #2441 - ShutdownDelay time.Duration -} - -func (t *Task) Copy() *Task { - if t == nil { - return nil - } - nt := new(Task) - *nt = *t - nt.Env = helper.CopyMapStringString(nt.Env) - - if t.Services != nil { - services := make([]*Service, len(nt.Services)) - for i, s := range nt.Services { - services[i] = s.Copy() - } - nt.Services = services - } - - nt.Constraints = CopySliceConstraints(nt.Constraints) - - nt.Vault = nt.Vault.Copy() - nt.Resources = nt.Resources.Copy() - nt.Meta = helper.CopyMapStringString(nt.Meta) - nt.DispatchPayload = nt.DispatchPayload.Copy() - - if t.Artifacts != nil { - artifacts := make([]*TaskArtifact, 0, len(t.Artifacts)) - for _, a := range nt.Artifacts { - artifacts = append(artifacts, a.Copy()) - } - nt.Artifacts = artifacts - } - - if i, err := copystructure.Copy(nt.Config); err != nil { - panic(err.Error()) - } else { - nt.Config = i.(map[string]interface{}) - } - - if t.Templates != nil { - templates := make([]*Template, len(t.Templates)) - for i, tmpl := range nt.Templates { - templates[i] = tmpl.Copy() - } - nt.Templates = templates - } - - return nt -} - -// Canonicalize canonicalizes fields in the task. -func (t *Task) Canonicalize(job *Job, tg *TaskGroup) { - // Ensure that an empty and nil map are treated the same to avoid scheduling - // problems since we use reflect DeepEquals. - if len(t.Meta) == 0 { - t.Meta = nil - } - if len(t.Config) == 0 { - t.Config = nil - } - if len(t.Env) == 0 { - t.Env = nil - } - - for _, service := range t.Services { - service.Canonicalize(job.Name, tg.Name, t.Name) - } - - // If Resources are nil initialize them to defaults, otherwise canonicalize - if t.Resources == nil { - t.Resources = DefaultResources() - } else { - t.Resources.Canonicalize() - } - - // Set the default timeout if it is not specified. - if t.KillTimeout == 0 { - t.KillTimeout = DefaultKillTimeout - } - - if t.Vault != nil { - t.Vault.Canonicalize() - } - - for _, template := range t.Templates { - template.Canonicalize() - } -} - -func (t *Task) GoString() string { - return fmt.Sprintf("*%#v", *t) -} - -// Validate is used to sanity check a task -func (t *Task) Validate(ephemeralDisk *EphemeralDisk) error { - var mErr multierror.Error - if t.Name == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task name")) - } - if strings.ContainsAny(t.Name, `/\`) { - // We enforce this so that when creating the directory on disk it will - // not have any slashes. - mErr.Errors = append(mErr.Errors, errors.New("Task name cannot include slashes")) - } - if t.Driver == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing task driver")) - } - if t.KillTimeout < 0 { - mErr.Errors = append(mErr.Errors, errors.New("KillTimeout must be a positive value")) - } - if t.ShutdownDelay < 0 { - mErr.Errors = append(mErr.Errors, errors.New("ShutdownDelay must be a positive value")) - } - - // Validate the resources. - if t.Resources == nil { - mErr.Errors = append(mErr.Errors, errors.New("Missing task resources")) - } else { - if err := t.Resources.MeetsMinResources(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - // Ensure the task isn't asking for disk resources - if t.Resources.DiskMB > 0 { - mErr.Errors = append(mErr.Errors, errors.New("Task can't ask for disk resources, they have to be specified at the task group level.")) - } - } - - // Validate the log config - if t.LogConfig == nil { - mErr.Errors = append(mErr.Errors, errors.New("Missing Log Config")) - } else if err := t.LogConfig.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - for idx, constr := range t.Constraints { - if err := constr.Validate(); err != nil { - outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - - switch constr.Operand { - case ConstraintDistinctHosts, ConstraintDistinctProperty: - outer := fmt.Errorf("Constraint %d has disallowed Operand at task level: %s", idx+1, constr.Operand) - mErr.Errors = append(mErr.Errors, outer) - } - } - - // Validate Services - if err := validateServices(t); err != nil { - mErr.Errors = append(mErr.Errors, err) - } - - if t.LogConfig != nil && ephemeralDisk != nil { - logUsage := (t.LogConfig.MaxFiles * t.LogConfig.MaxFileSizeMB) - if ephemeralDisk.SizeMB <= logUsage { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("log storage (%d MB) must be less than requested disk capacity (%d MB)", - logUsage, ephemeralDisk.SizeMB)) - } - } - - for idx, artifact := range t.Artifacts { - if err := artifact.Validate(); err != nil { - outer := fmt.Errorf("Artifact %d validation failed: %v", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - } - - if t.Vault != nil { - if err := t.Vault.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Vault validation failed: %v", err)) - } - } - - destinations := make(map[string]int, len(t.Templates)) - for idx, tmpl := range t.Templates { - if err := tmpl.Validate(); err != nil { - outer := fmt.Errorf("Template %d validation failed: %s", idx+1, err) - mErr.Errors = append(mErr.Errors, outer) - } - - if other, ok := destinations[tmpl.DestPath]; ok { - outer := fmt.Errorf("Template %d has same destination as %d", idx+1, other) - mErr.Errors = append(mErr.Errors, outer) - } else { - destinations[tmpl.DestPath] = idx + 1 - } - } - - // Validate the dispatch payload block if there - if t.DispatchPayload != nil { - if err := t.DispatchPayload.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Dispatch Payload validation failed: %v", err)) - } - } - - return mErr.ErrorOrNil() -} - -// validateServices takes a task and validates the services within it are valid -// and reference ports that exist. -func validateServices(t *Task) error { - var mErr multierror.Error - - // Ensure that services don't ask for non-existent ports and their names are - // unique. - servicePorts := make(map[string][]string) - knownServices := make(map[string]struct{}) - for i, service := range t.Services { - if err := service.Validate(); err != nil { - outer := fmt.Errorf("service[%d] %+q validation failed: %s", i, service.Name, err) - mErr.Errors = append(mErr.Errors, outer) - } - - // Ensure that services with the same name are not being registered for - // the same port - if _, ok := knownServices[service.Name+service.PortLabel]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("service %q is duplicate", service.Name)) - } - knownServices[service.Name+service.PortLabel] = struct{}{} - - if service.PortLabel != "" { - servicePorts[service.PortLabel] = append(servicePorts[service.PortLabel], service.Name) - } - - // Ensure that check names are unique. - knownChecks := make(map[string]struct{}) - for _, check := range service.Checks { - if _, ok := knownChecks[check.Name]; ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("check %q is duplicate", check.Name)) - } - knownChecks[check.Name] = struct{}{} - } - } - - // Get the set of port labels. - portLabels := make(map[string]struct{}) - if t.Resources != nil { - for _, network := range t.Resources.Networks { - ports := network.PortLabels() - for portLabel, _ := range ports { - portLabels[portLabel] = struct{}{} - } - } - } - - // Ensure all ports referenced in services exist. - for servicePort, services := range servicePorts { - _, ok := portLabels[servicePort] - if !ok { - joined := strings.Join(services, ", ") - err := fmt.Errorf("port label %q referenced by services %v does not exist", servicePort, joined) - mErr.Errors = append(mErr.Errors, err) - } - } - - // Ensure address mode is valid - return mErr.ErrorOrNil() -} - -const ( - // TemplateChangeModeNoop marks that no action should be taken if the - // template is re-rendered - TemplateChangeModeNoop = "noop" - - // TemplateChangeModeSignal marks that the task should be signaled if the - // template is re-rendered - TemplateChangeModeSignal = "signal" - - // TemplateChangeModeRestart marks that the task should be restarted if the - // template is re-rendered - TemplateChangeModeRestart = "restart" -) - -var ( - // TemplateChangeModeInvalidError is the error for when an invalid change - // mode is given - TemplateChangeModeInvalidError = errors.New("Invalid change mode. Must be one of the following: noop, signal, restart") -) - -// Template represents a template configuration to be rendered for a given task -type Template struct { - // SourcePath is the path to the template to be rendered - SourcePath string - - // DestPath is the path to where the template should be rendered - DestPath string - - // EmbeddedTmpl store the raw template. This is useful for smaller templates - // where they are embedded in the job file rather than sent as an artificat - EmbeddedTmpl string - - // ChangeMode indicates what should be done if the template is re-rendered - ChangeMode string - - // ChangeSignal is the signal that should be sent if the change mode - // requires it. - ChangeSignal string - - // Splay is used to avoid coordinated restarts of processes by applying a - // random wait between 0 and the given splay value before signalling the - // application of a change - Splay time.Duration - - // Perms is the permission the file should be written out with. - Perms string - - // LeftDelim and RightDelim are optional configurations to control what - // delimiter is utilized when parsing the template. - LeftDelim string - RightDelim string - - // Envvars enables exposing the template as environment variables - // instead of as a file. The template must be of the form: - // - // VAR_NAME_1={{ key service/my-key }} - // VAR_NAME_2=raw string and {{ env "attr.kernel.name" }} - // - // Lines will be split on the initial "=" with the first part being the - // key name and the second part the value. - // Empty lines and lines starting with # will be ignored, but to avoid - // escaping issues #s within lines will not be treated as comments. - Envvars bool - - // VaultGrace is the grace duration between lease renewal and reacquiring a - // secret. If the lease of a secret is less than the grace, a new secret is - // acquired. - VaultGrace time.Duration -} - -// DefaultTemplate returns a default template. -func DefaultTemplate() *Template { - return &Template{ - ChangeMode: TemplateChangeModeRestart, - Splay: 5 * time.Second, - Perms: "0644", - } -} - -func (t *Template) Copy() *Template { - if t == nil { - return nil - } - copy := new(Template) - *copy = *t - return copy -} - -func (t *Template) Canonicalize() { - if t.ChangeSignal != "" { - t.ChangeSignal = strings.ToUpper(t.ChangeSignal) - } -} - -func (t *Template) Validate() error { - var mErr multierror.Error - - // Verify we have something to render - if t.SourcePath == "" && t.EmbeddedTmpl == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a source path or have an embedded template")) - } - - // Verify we can render somewhere - if t.DestPath == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify a destination for the template")) - } - - // Verify the destination doesn't escape - escaped, err := PathEscapesAllocDir("task", t.DestPath) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) - } else if escaped { - mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) - } - - // Verify a proper change mode - switch t.ChangeMode { - case TemplateChangeModeNoop, TemplateChangeModeRestart: - case TemplateChangeModeSignal: - if t.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Must specify signal value when change mode is signal")) - } - if t.Envvars { - multierror.Append(&mErr, fmt.Errorf("cannot use signals with env var templates")) - } - default: - multierror.Append(&mErr, TemplateChangeModeInvalidError) - } - - // Verify the splay is positive - if t.Splay < 0 { - multierror.Append(&mErr, fmt.Errorf("Must specify positive splay value")) - } - - // Verify the permissions - if t.Perms != "" { - if _, err := strconv.ParseUint(t.Perms, 8, 12); err != nil { - multierror.Append(&mErr, fmt.Errorf("Failed to parse %q as octal: %v", t.Perms, err)) - } - } - - if t.VaultGrace.Nanoseconds() < 0 { - multierror.Append(&mErr, fmt.Errorf("Vault grace must be greater than zero: %v < 0", t.VaultGrace)) - } - - return mErr.ErrorOrNil() -} - -// Set of possible states for a task. -const ( - TaskStatePending = "pending" // The task is waiting to be run. - TaskStateRunning = "running" // The task is currently running. - TaskStateDead = "dead" // Terminal state of task. -) - -// TaskState tracks the current state of a task and events that caused state -// transitions. -type TaskState struct { - // The current state of the task. - State string - - // Failed marks a task as having failed - Failed bool - - // Restarts is the number of times the task has restarted - Restarts uint64 - - // LastRestart is the time the task last restarted. It is updated each time the - // task restarts - LastRestart time.Time - - // StartedAt is the time the task is started. It is updated each time the - // task starts - StartedAt time.Time - - // FinishedAt is the time at which the task transistioned to dead and will - // not be started again. - FinishedAt time.Time - - // Series of task events that transition the state of the task. - Events []*TaskEvent -} - -func (ts *TaskState) Copy() *TaskState { - if ts == nil { - return nil - } - copy := new(TaskState) - *copy = *ts - - if ts.Events != nil { - copy.Events = make([]*TaskEvent, len(ts.Events)) - for i, e := range ts.Events { - copy.Events[i] = e.Copy() - } - } - return copy -} - -// Successful returns whether a task finished successfully. -func (ts *TaskState) Successful() bool { - l := len(ts.Events) - if ts.State != TaskStateDead || l == 0 { - return false - } - - e := ts.Events[l-1] - if e.Type != TaskTerminated { - return false - } - - return e.ExitCode == 0 -} - -const ( - // TaskSetupFailure indicates that the task could not be started due to a - // a setup failure. - TaskSetupFailure = "Setup Failure" - - // TaskDriveFailure indicates that the task could not be started due to a - // failure in the driver. - TaskDriverFailure = "Driver Failure" - - // TaskReceived signals that the task has been pulled by the client at the - // given timestamp. - TaskReceived = "Received" - - // TaskFailedValidation indicates the task was invalid and as such was not - // run. - TaskFailedValidation = "Failed Validation" - - // TaskStarted signals that the task was started and its timestamp can be - // used to determine the running length of the task. - TaskStarted = "Started" - - // TaskTerminated indicates that the task was started and exited. - TaskTerminated = "Terminated" - - // TaskKilling indicates a kill signal has been sent to the task. - TaskKilling = "Killing" - - // TaskKilled indicates a user has killed the task. - TaskKilled = "Killed" - - // TaskRestarting indicates that task terminated and is being restarted. - TaskRestarting = "Restarting" - - // TaskNotRestarting indicates that the task has failed and is not being - // restarted because it has exceeded its restart policy. - TaskNotRestarting = "Not Restarting" - - // TaskRestartSignal indicates that the task has been signalled to be - // restarted - TaskRestartSignal = "Restart Signaled" - - // TaskSignaling indicates that the task is being signalled. - TaskSignaling = "Signaling" - - // TaskDownloadingArtifacts means the task is downloading the artifacts - // specified in the task. - TaskDownloadingArtifacts = "Downloading Artifacts" - - // TaskArtifactDownloadFailed indicates that downloading the artifacts - // failed. - TaskArtifactDownloadFailed = "Failed Artifact Download" - - // TaskBuildingTaskDir indicates that the task directory/chroot is being - // built. - TaskBuildingTaskDir = "Building Task Directory" - - // TaskSetup indicates the task runner is setting up the task environment - TaskSetup = "Task Setup" - - // TaskDiskExceeded indicates that one of the tasks in a taskgroup has - // exceeded the requested disk resources. - TaskDiskExceeded = "Disk Resources Exceeded" - - // TaskSiblingFailed indicates that a sibling task in the task group has - // failed. - TaskSiblingFailed = "Sibling Task Failed" - - // TaskDriverMessage is an informational event message emitted by - // drivers such as when they're performing a long running action like - // downloading an image. - TaskDriverMessage = "Driver" - - // TaskLeaderDead indicates that the leader task within the has finished. - TaskLeaderDead = "Leader Task Dead" - - // TaskGenericMessage is used by various subsystems to emit a message. - TaskGenericMessage = "Generic" -) - -// TaskEvent is an event that effects the state of a task and contains meta-data -// appropriate to the events type. -type TaskEvent struct { - Type string - Time int64 // Unix Nanosecond timestamp - - // FailsTask marks whether this event fails the task - FailsTask bool - - // Restart fields. - RestartReason string - - // Setup Failure fields. - SetupError string - - // Driver Failure fields. - DriverError string // A driver error occurred while starting the task. - - // Task Terminated Fields. - ExitCode int // The exit code of the task. - Signal int // The signal that terminated the task. - Message string // A possible message explaining the termination of the task. - - // Killing fields - KillTimeout time.Duration - - // Task Killed Fields. - KillError string // Error killing the task. - - // KillReason is the reason the task was killed - KillReason string - - // TaskRestarting fields. - StartDelay int64 // The sleep period before restarting the task in unix nanoseconds. - - // Artifact Download fields - DownloadError string // Error downloading artifacts - - // Validation fields - ValidationError string // Validation error - - // The maximum allowed task disk size. - DiskLimit int64 - - // Name of the sibling task that caused termination of the task that - // the TaskEvent refers to. - FailedSibling string - - // VaultError is the error from token renewal - VaultError string - - // TaskSignalReason indicates the reason the task is being signalled. - TaskSignalReason string - - // TaskSignal is the signal that was sent to the task - TaskSignal string - - // DriverMessage indicates a driver action being taken. - DriverMessage string - - // GenericSource is the source of a message. - GenericSource string -} - -func (te *TaskEvent) GoString() string { - return fmt.Sprintf("%v - %v", te.Time, te.Type) -} - -// SetMessage sets the message of TaskEvent -func (te *TaskEvent) SetMessage(msg string) *TaskEvent { - te.Message = msg - return te -} - -func (te *TaskEvent) Copy() *TaskEvent { - if te == nil { - return nil - } - copy := new(TaskEvent) - *copy = *te - return copy -} - -func NewTaskEvent(event string) *TaskEvent { - return &TaskEvent{ - Type: event, - Time: time.Now().UnixNano(), - } -} - -// SetSetupError is used to store an error that occurred while setting up the -// task -func (e *TaskEvent) SetSetupError(err error) *TaskEvent { - if err != nil { - e.SetupError = err.Error() - } - return e -} - -func (e *TaskEvent) SetFailsTask() *TaskEvent { - e.FailsTask = true - return e -} - -func (e *TaskEvent) SetDriverError(err error) *TaskEvent { - if err != nil { - e.DriverError = err.Error() - } - return e -} - -func (e *TaskEvent) SetExitCode(c int) *TaskEvent { - e.ExitCode = c - return e -} - -func (e *TaskEvent) SetSignal(s int) *TaskEvent { - e.Signal = s - return e -} - -func (e *TaskEvent) SetExitMessage(err error) *TaskEvent { - if err != nil { - e.Message = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillError(err error) *TaskEvent { - if err != nil { - e.KillError = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillReason(r string) *TaskEvent { - e.KillReason = r - return e -} - -func (e *TaskEvent) SetRestartDelay(delay time.Duration) *TaskEvent { - e.StartDelay = int64(delay) - return e -} - -func (e *TaskEvent) SetRestartReason(reason string) *TaskEvent { - e.RestartReason = reason - return e -} - -func (e *TaskEvent) SetTaskSignalReason(r string) *TaskEvent { - e.TaskSignalReason = r - return e -} - -func (e *TaskEvent) SetTaskSignal(s os.Signal) *TaskEvent { - e.TaskSignal = s.String() - return e -} - -func (e *TaskEvent) SetDownloadError(err error) *TaskEvent { - if err != nil { - e.DownloadError = err.Error() - } - return e -} - -func (e *TaskEvent) SetValidationError(err error) *TaskEvent { - if err != nil { - e.ValidationError = err.Error() - } - return e -} - -func (e *TaskEvent) SetKillTimeout(timeout time.Duration) *TaskEvent { - e.KillTimeout = timeout - return e -} - -func (e *TaskEvent) SetDiskLimit(limit int64) *TaskEvent { - e.DiskLimit = limit - return e -} - -func (e *TaskEvent) SetFailedSibling(sibling string) *TaskEvent { - e.FailedSibling = sibling - return e -} - -func (e *TaskEvent) SetVaultRenewalError(err error) *TaskEvent { - if err != nil { - e.VaultError = err.Error() - } - return e -} - -func (e *TaskEvent) SetDriverMessage(m string) *TaskEvent { - e.DriverMessage = m - return e -} - -func (e *TaskEvent) SetGenericSource(s string) *TaskEvent { - e.GenericSource = s - return e -} - -// TaskArtifact is an artifact to download before running the task. -type TaskArtifact struct { - // GetterSource is the source to download an artifact using go-getter - GetterSource string - - // GetterOptions are options to use when downloading the artifact using - // go-getter. - GetterOptions map[string]string - - // GetterMode is the go-getter.ClientMode for fetching resources. - // Defaults to "any" but can be set to "file" or "dir". - GetterMode string - - // RelativeDest is the download destination given relative to the task's - // directory. - RelativeDest string -} - -func (ta *TaskArtifact) Copy() *TaskArtifact { - if ta == nil { - return nil - } - nta := new(TaskArtifact) - *nta = *ta - nta.GetterOptions = helper.CopyMapStringString(ta.GetterOptions) - return nta -} - -func (ta *TaskArtifact) GoString() string { - return fmt.Sprintf("%+v", ta) -} - -// PathEscapesAllocDir returns if the given path escapes the allocation -// directory. The prefix allows adding a prefix if the path will be joined, for -// example a "task/local" prefix may be provided if the path will be joined -// against that prefix. -func PathEscapesAllocDir(prefix, path string) (bool, error) { - // Verify the destination doesn't escape the tasks directory - alloc, err := filepath.Abs(filepath.Join("/", "alloc-dir/", "alloc-id/")) - if err != nil { - return false, err - } - abs, err := filepath.Abs(filepath.Join(alloc, prefix, path)) - if err != nil { - return false, err - } - rel, err := filepath.Rel(alloc, abs) - if err != nil { - return false, err - } - - return strings.HasPrefix(rel, ".."), nil -} - -func (ta *TaskArtifact) Validate() error { - // Verify the source - var mErr multierror.Error - if ta.GetterSource == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("source must be specified")) - } - - switch ta.GetterMode { - case "": - // Default to any - ta.GetterMode = GetterModeAny - case GetterModeAny, GetterModeFile, GetterModeDir: - // Ok - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid artifact mode %q; must be one of: %s, %s, %s", - ta.GetterMode, GetterModeAny, GetterModeFile, GetterModeDir)) - } - - escaped, err := PathEscapesAllocDir("task", ta.RelativeDest) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid destination path: %v", err)) - } else if escaped { - mErr.Errors = append(mErr.Errors, fmt.Errorf("destination escapes allocation directory")) - } - - // Verify the checksum - if check, ok := ta.GetterOptions["checksum"]; ok { - check = strings.TrimSpace(check) - if check == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("checksum value cannot be empty")) - return mErr.ErrorOrNil() - } - - parts := strings.Split(check, ":") - if l := len(parts); l != 2 { - mErr.Errors = append(mErr.Errors, fmt.Errorf(`checksum must be given as "type:value"; got %q`, check)) - return mErr.ErrorOrNil() - } - - checksumVal := parts[1] - checksumBytes, err := hex.DecodeString(checksumVal) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid checksum: %v", err)) - return mErr.ErrorOrNil() - } - - checksumType := parts[0] - expectedLength := 0 - switch checksumType { - case "md5": - expectedLength = md5.Size - case "sha1": - expectedLength = sha1.Size - case "sha256": - expectedLength = sha256.Size - case "sha512": - expectedLength = sha512.Size - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("unsupported checksum type: %s", checksumType)) - return mErr.ErrorOrNil() - } - - if len(checksumBytes) != expectedLength { - mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid %s checksum: %v", checksumType, checksumVal)) - return mErr.ErrorOrNil() - } - } - - return mErr.ErrorOrNil() -} - -const ( - ConstraintDistinctProperty = "distinct_property" - ConstraintDistinctHosts = "distinct_hosts" - ConstraintRegex = "regexp" - ConstraintVersion = "version" - ConstraintSetContains = "set_contains" -) - -// Constraints are used to restrict placement options. -type Constraint struct { - LTarget string // Left-hand target - RTarget string // Right-hand target - Operand string // Constraint operand (<=, <, =, !=, >, >=), contains, near - str string // Memoized string -} - -// Equal checks if two constraints are equal -func (c *Constraint) Equal(o *Constraint) bool { - return c.LTarget == o.LTarget && - c.RTarget == o.RTarget && - c.Operand == o.Operand -} - -func (c *Constraint) Copy() *Constraint { - if c == nil { - return nil - } - nc := new(Constraint) - *nc = *c - return nc -} - -func (c *Constraint) String() string { - if c.str != "" { - return c.str - } - c.str = fmt.Sprintf("%s %s %s", c.LTarget, c.Operand, c.RTarget) - return c.str -} - -func (c *Constraint) Validate() error { - var mErr multierror.Error - if c.Operand == "" { - mErr.Errors = append(mErr.Errors, errors.New("Missing constraint operand")) - } - - // requireLtarget specifies whether the constraint requires an LTarget to be - // provided. - requireLtarget := true - - // Perform additional validation based on operand - switch c.Operand { - case ConstraintDistinctHosts: - requireLtarget = false - case ConstraintSetContains: - if c.RTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Set contains constraint requires an RTarget")) - } - case ConstraintRegex: - if _, err := regexp.Compile(c.RTarget); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Regular expression failed to compile: %v", err)) - } - case ConstraintVersion: - if _, err := version.NewConstraint(c.RTarget); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Version constraint is invalid: %v", err)) - } - case ConstraintDistinctProperty: - // If a count is set, make sure it is convertible to a uint64 - if c.RTarget != "" { - count, err := strconv.ParseUint(c.RTarget, 10, 64) - if err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Failed to convert RTarget %q to uint64: %v", c.RTarget, err)) - } else if count < 1 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Distinct Property must have an allowed count of 1 or greater: %d < 1", count)) - } - } - case "=", "==", "is", "!=", "not", "<", "<=", ">", ">=": - if c.RTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Operator %q requires an RTarget", c.Operand)) - } - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("Unknown constraint type %q", c.Operand)) - } - - // Ensure we have an LTarget for the constraints that need one - if requireLtarget && c.LTarget == "" { - mErr.Errors = append(mErr.Errors, fmt.Errorf("No LTarget provided but is required by constraint")) - } - - return mErr.ErrorOrNil() -} - -// EphemeralDisk is an ephemeral disk object -type EphemeralDisk struct { - // Sticky indicates whether the allocation is sticky to a node - Sticky bool - - // SizeMB is the size of the local disk - SizeMB int - - // Migrate determines if Nomad client should migrate the allocation dir for - // sticky allocations - Migrate bool -} - -// DefaultEphemeralDisk returns a EphemeralDisk with default configurations -func DefaultEphemeralDisk() *EphemeralDisk { - return &EphemeralDisk{ - SizeMB: 300, - } -} - -// Validate validates EphemeralDisk -func (d *EphemeralDisk) Validate() error { - if d.SizeMB < 10 { - return fmt.Errorf("minimum DiskMB value is 10; got %d", d.SizeMB) - } - return nil -} - -// Copy copies the EphemeralDisk struct and returns a new one -func (d *EphemeralDisk) Copy() *EphemeralDisk { - ld := new(EphemeralDisk) - *ld = *d - return ld -} - -const ( - // VaultChangeModeNoop takes no action when a new token is retrieved. - VaultChangeModeNoop = "noop" - - // VaultChangeModeSignal signals the task when a new token is retrieved. - VaultChangeModeSignal = "signal" - - // VaultChangeModeRestart restarts the task when a new token is retrieved. - VaultChangeModeRestart = "restart" -) - -// Vault stores the set of permissions a task needs access to from Vault. -type Vault struct { - // Policies is the set of policies that the task needs access to - Policies []string - - // Env marks whether the Vault Token should be exposed as an environment - // variable - Env bool - - // ChangeMode is used to configure the task's behavior when the Vault - // token changes because the original token could not be renewed in time. - ChangeMode string - - // ChangeSignal is the signal sent to the task when a new token is - // retrieved. This is only valid when using the signal change mode. - ChangeSignal string -} - -func DefaultVaultBlock() *Vault { - return &Vault{ - Env: true, - ChangeMode: VaultChangeModeRestart, - } -} - -// Copy returns a copy of this Vault block. -func (v *Vault) Copy() *Vault { - if v == nil { - return nil - } - - nv := new(Vault) - *nv = *v - return nv -} - -func (v *Vault) Canonicalize() { - if v.ChangeSignal != "" { - v.ChangeSignal = strings.ToUpper(v.ChangeSignal) - } -} - -// Validate returns if the Vault block is valid. -func (v *Vault) Validate() error { - if v == nil { - return nil - } - - var mErr multierror.Error - if len(v.Policies) == 0 { - multierror.Append(&mErr, fmt.Errorf("Policy list cannot be empty")) - } - - for _, p := range v.Policies { - if p == "root" { - multierror.Append(&mErr, fmt.Errorf("Can not specify \"root\" policy")) - } - } - - switch v.ChangeMode { - case VaultChangeModeSignal: - if v.ChangeSignal == "" { - multierror.Append(&mErr, fmt.Errorf("Signal must be specified when using change mode %q", VaultChangeModeSignal)) - } - case VaultChangeModeNoop, VaultChangeModeRestart: - default: - multierror.Append(&mErr, fmt.Errorf("Unknown change mode %q", v.ChangeMode)) - } - - return mErr.ErrorOrNil() -} - -const ( - // DeploymentStatuses are the various states a deployment can be be in - DeploymentStatusRunning = "running" - DeploymentStatusPaused = "paused" - DeploymentStatusFailed = "failed" - DeploymentStatusSuccessful = "successful" - DeploymentStatusCancelled = "cancelled" - - // DeploymentStatusDescriptions are the various descriptions of the states a - // deployment can be in. - DeploymentStatusDescriptionRunning = "Deployment is running" - DeploymentStatusDescriptionRunningNeedsPromotion = "Deployment is running but requires promotion" - DeploymentStatusDescriptionPaused = "Deployment is paused" - DeploymentStatusDescriptionSuccessful = "Deployment completed successfully" - DeploymentStatusDescriptionStoppedJob = "Cancelled because job is stopped" - DeploymentStatusDescriptionNewerJob = "Cancelled due to newer version of job" - DeploymentStatusDescriptionFailedAllocations = "Failed due to unhealthy allocations" - DeploymentStatusDescriptionFailedByUser = "Deployment marked as failed" -) - -// DeploymentStatusDescriptionRollback is used to get the status description of -// a deployment when rolling back to an older job. -func DeploymentStatusDescriptionRollback(baseDescription string, jobVersion uint64) string { - return fmt.Sprintf("%s - rolling back to job version %d", baseDescription, jobVersion) -} - -// DeploymentStatusDescriptionNoRollbackTarget is used to get the status description of -// a deployment when there is no target to rollback to but autorevet is desired. -func DeploymentStatusDescriptionNoRollbackTarget(baseDescription string) string { - return fmt.Sprintf("%s - no stable job version to auto revert to", baseDescription) -} - -// Deployment is the object that represents a job deployment which is used to -// transition a job between versions. -type Deployment struct { - // ID is a generated UUID for the deployment - ID string - - // Namespace is the namespace the deployment is created in - Namespace string - - // JobID is the job the deployment is created for - JobID string - - // JobVersion is the version of the job at which the deployment is tracking - JobVersion uint64 - - // JobModifyIndex is the modify index of the job at which the deployment is tracking - JobModifyIndex uint64 - - // JobCreateIndex is the create index of the job which the deployment is - // tracking. It is needed so that if the job gets stopped and reran we can - // present the correct list of deployments for the job and not old ones. - JobCreateIndex uint64 - - // TaskGroups is the set of task groups effected by the deployment and their - // current deployment status. - TaskGroups map[string]*DeploymentState - - // The status of the deployment - Status string - - // StatusDescription allows a human readable description of the deployment - // status. - StatusDescription string - - CreateIndex uint64 - ModifyIndex uint64 -} - -// NewDeployment creates a new deployment given the job. -func NewDeployment(job *Job) *Deployment { - return &Deployment{ - ID: GenerateUUID(), - Namespace: job.Namespace, - JobID: job.ID, - JobVersion: job.Version, - JobModifyIndex: job.ModifyIndex, - JobCreateIndex: job.CreateIndex, - Status: DeploymentStatusRunning, - StatusDescription: DeploymentStatusDescriptionRunning, - TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)), - } -} - -func (d *Deployment) Copy() *Deployment { - if d == nil { - return nil - } - - c := &Deployment{} - *c = *d - - c.TaskGroups = nil - if l := len(d.TaskGroups); d.TaskGroups != nil { - c.TaskGroups = make(map[string]*DeploymentState, l) - for tg, s := range d.TaskGroups { - c.TaskGroups[tg] = s.Copy() - } - } - - return c -} - -// Active returns whether the deployment is active or terminal. -func (d *Deployment) Active() bool { - switch d.Status { - case DeploymentStatusRunning, DeploymentStatusPaused: - return true - default: - return false - } -} - -// GetID is a helper for getting the ID when the object may be nil -func (d *Deployment) GetID() string { - if d == nil { - return "" - } - return d.ID -} - -// HasPlacedCanaries returns whether the deployment has placed canaries -func (d *Deployment) HasPlacedCanaries() bool { - if d == nil || len(d.TaskGroups) == 0 { - return false - } - for _, group := range d.TaskGroups { - if len(group.PlacedCanaries) != 0 { - return true - } - } - return false -} - -// RequiresPromotion returns whether the deployment requires promotion to -// continue -func (d *Deployment) RequiresPromotion() bool { - if d == nil || len(d.TaskGroups) == 0 || d.Status != DeploymentStatusRunning { - return false - } - for _, group := range d.TaskGroups { - if group.DesiredCanaries > 0 && !group.Promoted { - return true - } - } - return false -} - -func (d *Deployment) GoString() string { - base := fmt.Sprintf("Deployment ID %q for job %q has status %q (%v):", d.ID, d.JobID, d.Status, d.StatusDescription) - for group, state := range d.TaskGroups { - base += fmt.Sprintf("\nTask Group %q has state:\n%#v", group, state) - } - return base -} - -// DeploymentState tracks the state of a deployment for a given task group. -type DeploymentState struct { - // AutoRevert marks whether the task group has indicated the job should be - // reverted on failure - AutoRevert bool - - // Promoted marks whether the canaries have been promoted - Promoted bool - - // PlacedCanaries is the set of placed canary allocations - PlacedCanaries []string - - // DesiredCanaries is the number of canaries that should be created. - DesiredCanaries int - - // DesiredTotal is the total number of allocations that should be created as - // part of the deployment. - DesiredTotal int - - // PlacedAllocs is the number of allocations that have been placed - PlacedAllocs int - - // HealthyAllocs is the number of allocations that have been marked healthy. - HealthyAllocs int - - // UnhealthyAllocs are allocations that have been marked as unhealthy. - UnhealthyAllocs int -} - -func (d *DeploymentState) GoString() string { - base := fmt.Sprintf("\tDesired Total: %d", d.DesiredTotal) - base += fmt.Sprintf("\n\tDesired Canaries: %d", d.DesiredCanaries) - base += fmt.Sprintf("\n\tPlaced Canaries: %#v", d.PlacedCanaries) - base += fmt.Sprintf("\n\tPromoted: %v", d.Promoted) - base += fmt.Sprintf("\n\tPlaced: %d", d.PlacedAllocs) - base += fmt.Sprintf("\n\tHealthy: %d", d.HealthyAllocs) - base += fmt.Sprintf("\n\tUnhealthy: %d", d.UnhealthyAllocs) - base += fmt.Sprintf("\n\tAutoRevert: %v", d.AutoRevert) - return base -} - -func (d *DeploymentState) Copy() *DeploymentState { - c := &DeploymentState{} - *c = *d - c.PlacedCanaries = helper.CopySliceString(d.PlacedCanaries) - return c -} - -// DeploymentStatusUpdate is used to update the status of a given deployment -type DeploymentStatusUpdate struct { - // DeploymentID is the ID of the deployment to update - DeploymentID string - - // Status is the new status of the deployment. - Status string - - // StatusDescription is the new status description of the deployment. - StatusDescription string -} - -const ( - AllocDesiredStatusRun = "run" // Allocation should run - AllocDesiredStatusStop = "stop" // Allocation should stop - AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted -) - -const ( - AllocClientStatusPending = "pending" - AllocClientStatusRunning = "running" - AllocClientStatusComplete = "complete" - AllocClientStatusFailed = "failed" - AllocClientStatusLost = "lost" -) - -// Allocation is used to allocate the placement of a task group to a node. -type Allocation struct { - // ID of the allocation (UUID) - ID string - - // Namespace is the namespace the allocation is created in - Namespace string - - // ID of the evaluation that generated this allocation - EvalID string - - // Name is a logical name of the allocation. - Name string - - // NodeID is the node this is being placed on - NodeID string - - // Job is the parent job of the task group being allocated. - // This is copied at allocation time to avoid issues if the job - // definition is updated. - JobID string - Job *Job - - // TaskGroup is the name of the task group that should be run - TaskGroup string - - // Resources is the total set of resources allocated as part - // of this allocation of the task group. - Resources *Resources - - // SharedResources are the resources that are shared by all the tasks in an - // allocation - SharedResources *Resources - - // TaskResources is the set of resources allocated to each - // task. These should sum to the total Resources. - TaskResources map[string]*Resources - - // Metrics associated with this allocation - Metrics *AllocMetric - - // Desired Status of the allocation on the client - DesiredStatus string - - // DesiredStatusDescription is meant to provide more human useful information - DesiredDescription string - - // Status of the allocation on the client - ClientStatus string - - // ClientStatusDescription is meant to provide more human useful information - ClientDescription string - - // TaskStates stores the state of each task, - TaskStates map[string]*TaskState - - // PreviousAllocation is the allocation that this allocation is replacing - PreviousAllocation string - - // DeploymentID identifies an allocation as being created from a - // particular deployment - DeploymentID string - - // DeploymentStatus captures the status of the allocation as part of the - // given deployment - DeploymentStatus *AllocDeploymentStatus - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 - - // AllocModifyIndex is not updated when the client updates allocations. This - // lets the client pull only the allocs updated by the server. - AllocModifyIndex uint64 - - // CreateTime is the time the allocation has finished scheduling and been - // verified by the plan applier. - CreateTime int64 -} - -// Index returns the index of the allocation. If the allocation is from a task -// group with count greater than 1, there will be multiple allocations for it. -func (a *Allocation) Index() uint { - l := len(a.Name) - prefix := len(a.JobID) + len(a.TaskGroup) + 2 - if l <= 3 || l <= prefix { - return uint(0) - } - - strNum := a.Name[prefix : len(a.Name)-1] - num, _ := strconv.Atoi(strNum) - return uint(num) -} - -func (a *Allocation) Copy() *Allocation { - return a.copyImpl(true) -} - -// Copy provides a copy of the allocation but doesn't deep copy the job -func (a *Allocation) CopySkipJob() *Allocation { - return a.copyImpl(false) -} - -func (a *Allocation) copyImpl(job bool) *Allocation { - if a == nil { - return nil - } - na := new(Allocation) - *na = *a - - if job { - na.Job = na.Job.Copy() - } - - na.Resources = na.Resources.Copy() - na.SharedResources = na.SharedResources.Copy() - - if a.TaskResources != nil { - tr := make(map[string]*Resources, len(na.TaskResources)) - for task, resource := range na.TaskResources { - tr[task] = resource.Copy() - } - na.TaskResources = tr - } - - na.Metrics = na.Metrics.Copy() - na.DeploymentStatus = na.DeploymentStatus.Copy() - - if a.TaskStates != nil { - ts := make(map[string]*TaskState, len(na.TaskStates)) - for task, state := range na.TaskStates { - ts[task] = state.Copy() - } - na.TaskStates = ts - } - return na -} - -// TerminalStatus returns if the desired or actual status is terminal and -// will no longer transition. -func (a *Allocation) TerminalStatus() bool { - // First check the desired state and if that isn't terminal, check client - // state. - switch a.DesiredStatus { - case AllocDesiredStatusStop, AllocDesiredStatusEvict: - return true - default: - } - - switch a.ClientStatus { - case AllocClientStatusComplete, AllocClientStatusFailed, AllocClientStatusLost: - return true - default: - return false - } -} - -// Terminated returns if the allocation is in a terminal state on a client. -func (a *Allocation) Terminated() bool { - if a.ClientStatus == AllocClientStatusFailed || - a.ClientStatus == AllocClientStatusComplete || - a.ClientStatus == AllocClientStatusLost { - return true - } - return false -} - -// RanSuccessfully returns whether the client has ran the allocation and all -// tasks finished successfully -func (a *Allocation) RanSuccessfully() bool { - return a.ClientStatus == AllocClientStatusComplete -} - -// ShouldMigrate returns if the allocation needs data migration -func (a *Allocation) ShouldMigrate() bool { - if a.DesiredStatus == AllocDesiredStatusStop || a.DesiredStatus == AllocDesiredStatusEvict { - return false - } - - tg := a.Job.LookupTaskGroup(a.TaskGroup) - - // if the task group is nil or the ephemeral disk block isn't present then - // we won't migrate - if tg == nil || tg.EphemeralDisk == nil { - return false - } - - // We won't migrate any data is the user hasn't enabled migration or the - // disk is not marked as sticky - if !tg.EphemeralDisk.Migrate || !tg.EphemeralDisk.Sticky { - return false - } - - return true -} - -// Stub returns a list stub for the allocation -func (a *Allocation) Stub() *AllocListStub { - return &AllocListStub{ - ID: a.ID, - EvalID: a.EvalID, - Name: a.Name, - NodeID: a.NodeID, - JobID: a.JobID, - JobVersion: a.Job.Version, - TaskGroup: a.TaskGroup, - DesiredStatus: a.DesiredStatus, - DesiredDescription: a.DesiredDescription, - ClientStatus: a.ClientStatus, - ClientDescription: a.ClientDescription, - TaskStates: a.TaskStates, - DeploymentStatus: a.DeploymentStatus, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - CreateTime: a.CreateTime, - } -} - -// AllocListStub is used to return a subset of alloc information -type AllocListStub struct { - ID string - EvalID string - Name string - NodeID string - JobID string - JobVersion uint64 - TaskGroup string - DesiredStatus string - DesiredDescription string - ClientStatus string - ClientDescription string - TaskStates map[string]*TaskState - DeploymentStatus *AllocDeploymentStatus - CreateIndex uint64 - ModifyIndex uint64 - CreateTime int64 -} - -// AllocMetric is used to track various metrics while attempting -// to make an allocation. These are used to debug a job, or to better -// understand the pressure within the system. -type AllocMetric struct { - // NodesEvaluated is the number of nodes that were evaluated - NodesEvaluated int - - // NodesFiltered is the number of nodes filtered due to a constraint - NodesFiltered int - - // NodesAvailable is the number of nodes available for evaluation per DC. - NodesAvailable map[string]int - - // ClassFiltered is the number of nodes filtered by class - ClassFiltered map[string]int - - // ConstraintFiltered is the number of failures caused by constraint - ConstraintFiltered map[string]int - - // NodesExhausted is the number of nodes skipped due to being - // exhausted of at least one resource - NodesExhausted int - - // ClassExhausted is the number of nodes exhausted by class - ClassExhausted map[string]int - - // DimensionExhausted provides the count by dimension or reason - DimensionExhausted map[string]int - - // Scores is the scores of the final few nodes remaining - // for placement. The top score is typically selected. - Scores map[string]float64 - - // AllocationTime is a measure of how long the allocation - // attempt took. This can affect performance and SLAs. - AllocationTime time.Duration - - // CoalescedFailures indicates the number of other - // allocations that were coalesced into this failed allocation. - // This is to prevent creating many failed allocations for a - // single task group. - CoalescedFailures int -} - -func (a *AllocMetric) Copy() *AllocMetric { - if a == nil { - return nil - } - na := new(AllocMetric) - *na = *a - na.NodesAvailable = helper.CopyMapStringInt(na.NodesAvailable) - na.ClassFiltered = helper.CopyMapStringInt(na.ClassFiltered) - na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered) - na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted) - na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted) - na.Scores = helper.CopyMapStringFloat64(na.Scores) - return na -} - -func (a *AllocMetric) EvaluateNode() { - a.NodesEvaluated += 1 -} - -func (a *AllocMetric) FilterNode(node *Node, constraint string) { - a.NodesFiltered += 1 - if node != nil && node.NodeClass != "" { - if a.ClassFiltered == nil { - a.ClassFiltered = make(map[string]int) - } - a.ClassFiltered[node.NodeClass] += 1 - } - if constraint != "" { - if a.ConstraintFiltered == nil { - a.ConstraintFiltered = make(map[string]int) - } - a.ConstraintFiltered[constraint] += 1 - } -} - -func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) { - a.NodesExhausted += 1 - if node != nil && node.NodeClass != "" { - if a.ClassExhausted == nil { - a.ClassExhausted = make(map[string]int) - } - a.ClassExhausted[node.NodeClass] += 1 - } - if dimension != "" { - if a.DimensionExhausted == nil { - a.DimensionExhausted = make(map[string]int) - } - a.DimensionExhausted[dimension] += 1 - } -} - -func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) { - if a.Scores == nil { - a.Scores = make(map[string]float64) - } - key := fmt.Sprintf("%s.%s", node.ID, name) - a.Scores[key] = score -} - -// AllocDeploymentStatus captures the status of the allocation as part of the -// deployment. This can include things like if the allocation has been marked as -// heatlhy. -type AllocDeploymentStatus struct { - // Healthy marks whether the allocation has been marked healthy or unhealthy - // as part of a deployment. It can be unset if it has neither been marked - // healthy or unhealthy. - Healthy *bool - - // ModifyIndex is the raft index in which the deployment status was last - // changed. - ModifyIndex uint64 -} - -// IsHealthy returns if the allocation is marked as healthy as part of a -// deployment -func (a *AllocDeploymentStatus) IsHealthy() bool { - if a == nil { - return false - } - - return a.Healthy != nil && *a.Healthy -} - -// IsUnhealthy returns if the allocation is marked as unhealthy as part of a -// deployment -func (a *AllocDeploymentStatus) IsUnhealthy() bool { - if a == nil { - return false - } - - return a.Healthy != nil && !*a.Healthy -} - -func (a *AllocDeploymentStatus) Copy() *AllocDeploymentStatus { - if a == nil { - return nil - } - - c := new(AllocDeploymentStatus) - *c = *a - - if a.Healthy != nil { - c.Healthy = helper.BoolToPtr(*a.Healthy) - } - - return c -} - -const ( - EvalStatusBlocked = "blocked" - EvalStatusPending = "pending" - EvalStatusComplete = "complete" - EvalStatusFailed = "failed" - EvalStatusCancelled = "canceled" -) - -const ( - EvalTriggerJobRegister = "job-register" - EvalTriggerJobDeregister = "job-deregister" - EvalTriggerPeriodicJob = "periodic-job" - EvalTriggerNodeUpdate = "node-update" - EvalTriggerScheduled = "scheduled" - EvalTriggerRollingUpdate = "rolling-update" - EvalTriggerDeploymentWatcher = "deployment-watcher" - EvalTriggerFailedFollowUp = "failed-follow-up" - EvalTriggerMaxPlans = "max-plan-attempts" -) - -const ( - // CoreJobEvalGC is used for the garbage collection of evaluations - // and allocations. We periodically scan evaluations in a terminal state, - // in which all the corresponding allocations are also terminal. We - // delete these out of the system to bound the state. - CoreJobEvalGC = "eval-gc" - - // CoreJobNodeGC is used for the garbage collection of failed nodes. - // We periodically scan nodes in a terminal state, and if they have no - // corresponding allocations we delete these out of the system. - CoreJobNodeGC = "node-gc" - - // CoreJobJobGC is used for the garbage collection of eligible jobs. We - // periodically scan garbage collectible jobs and check if both their - // evaluations and allocations are terminal. If so, we delete these out of - // the system. - CoreJobJobGC = "job-gc" - - // CoreJobDeploymentGC is used for the garbage collection of eligible - // deployments. We periodically scan garbage collectible deployments and - // check if they are terminal. If so, we delete these out of the system. - CoreJobDeploymentGC = "deployment-gc" - - // CoreJobForceGC is used to force garbage collection of all GCable objects. - CoreJobForceGC = "force-gc" -) - -// Evaluation is used anytime we need to apply business logic as a result -// of a change to our desired state (job specification) or the emergent state -// (registered nodes). When the inputs change, we need to "evaluate" them, -// potentially taking action (allocation of work) or doing nothing if the state -// of the world does not require it. -type Evaluation struct { - // ID is a randonly generated UUID used for this evaluation. This - // is assigned upon the creation of the evaluation. - ID string - - // Namespace is the namespace the evaluation is created in - Namespace string - - // Priority is used to control scheduling importance and if this job - // can preempt other jobs. - Priority int - - // Type is used to control which schedulers are available to handle - // this evaluation. - Type string - - // TriggeredBy is used to give some insight into why this Eval - // was created. (Job change, node failure, alloc failure, etc). - TriggeredBy string - - // JobID is the job this evaluation is scoped to. Evaluations cannot - // be run in parallel for a given JobID, so we serialize on this. - JobID string - - // JobModifyIndex is the modify index of the job at the time - // the evaluation was created - JobModifyIndex uint64 - - // NodeID is the node that was affected triggering the evaluation. - NodeID string - - // NodeModifyIndex is the modify index of the node at the time - // the evaluation was created - NodeModifyIndex uint64 - - // DeploymentID is the ID of the deployment that triggered the evaluation. - DeploymentID string - - // Status of the evaluation - Status string - - // StatusDescription is meant to provide more human useful information - StatusDescription string - - // Wait is a minimum wait time for running the eval. This is used to - // support a rolling upgrade. - Wait time.Duration - - // NextEval is the evaluation ID for the eval created to do a followup. - // This is used to support rolling upgrades, where we need a chain of evaluations. - NextEval string - - // PreviousEval is the evaluation ID for the eval creating this one to do a followup. - // This is used to support rolling upgrades, where we need a chain of evaluations. - PreviousEval string - - // BlockedEval is the evaluation ID for a created blocked eval. A - // blocked eval will be created if all allocations could not be placed due - // to constraints or lacking resources. - BlockedEval string - - // FailedTGAllocs are task groups which have allocations that could not be - // made, but the metrics are persisted so that the user can use the feedback - // to determine the cause. - FailedTGAllocs map[string]*AllocMetric - - // ClassEligibility tracks computed node classes that have been explicitly - // marked as eligible or ineligible. - ClassEligibility map[string]bool - - // EscapedComputedClass marks whether the job has constraints that are not - // captured by computed node classes. - EscapedComputedClass bool - - // AnnotatePlan triggers the scheduler to provide additional annotations - // during the evaluation. This should not be set during normal operations. - AnnotatePlan bool - - // QueuedAllocations is the number of unplaced allocations at the time the - // evaluation was processed. The map is keyed by Task Group names. - QueuedAllocations map[string]int - - // SnapshotIndex is the Raft index of the snapshot used to process the - // evaluation. As such it will only be set once it has gone through the - // scheduler. - SnapshotIndex uint64 - - // Raft Indexes - CreateIndex uint64 - ModifyIndex uint64 -} - -// TerminalStatus returns if the current status is terminal and -// will no longer transition. -func (e *Evaluation) TerminalStatus() bool { - switch e.Status { - case EvalStatusComplete, EvalStatusFailed, EvalStatusCancelled: - return true - default: - return false - } -} - -func (e *Evaluation) GoString() string { - return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) -} - -func (e *Evaluation) Copy() *Evaluation { - if e == nil { - return nil - } - ne := new(Evaluation) - *ne = *e - - // Copy ClassEligibility - if e.ClassEligibility != nil { - classes := make(map[string]bool, len(e.ClassEligibility)) - for class, elig := range e.ClassEligibility { - classes[class] = elig - } - ne.ClassEligibility = classes - } - - // Copy FailedTGAllocs - if e.FailedTGAllocs != nil { - failedTGs := make(map[string]*AllocMetric, len(e.FailedTGAllocs)) - for tg, metric := range e.FailedTGAllocs { - failedTGs[tg] = metric.Copy() - } - ne.FailedTGAllocs = failedTGs - } - - // Copy queued allocations - if e.QueuedAllocations != nil { - queuedAllocations := make(map[string]int, len(e.QueuedAllocations)) - for tg, num := range e.QueuedAllocations { - queuedAllocations[tg] = num - } - ne.QueuedAllocations = queuedAllocations - } - - return ne -} - -// ShouldEnqueue checks if a given evaluation should be enqueued into the -// eval_broker -func (e *Evaluation) ShouldEnqueue() bool { - switch e.Status { - case EvalStatusPending: - return true - case EvalStatusComplete, EvalStatusFailed, EvalStatusBlocked, EvalStatusCancelled: - return false - default: - panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) - } -} - -// ShouldBlock checks if a given evaluation should be entered into the blocked -// eval tracker. -func (e *Evaluation) ShouldBlock() bool { - switch e.Status { - case EvalStatusBlocked: - return true - case EvalStatusComplete, EvalStatusFailed, EvalStatusPending, EvalStatusCancelled: - return false - default: - panic(fmt.Sprintf("unhandled evaluation (%s) status %s", e.ID, e.Status)) - } -} - -// MakePlan is used to make a plan from the given evaluation -// for a given Job -func (e *Evaluation) MakePlan(j *Job) *Plan { - p := &Plan{ - EvalID: e.ID, - Priority: e.Priority, - Job: j, - NodeUpdate: make(map[string][]*Allocation), - NodeAllocation: make(map[string][]*Allocation), - } - if j != nil { - p.AllAtOnce = j.AllAtOnce - } - return p -} - -// NextRollingEval creates an evaluation to followup this eval for rolling updates -func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: EvalTriggerRollingUpdate, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusPending, - Wait: wait, - PreviousEval: e.ID, - } -} - -// CreateBlockedEval creates a blocked evaluation to followup this eval to place any -// failed allocations. It takes the classes marked explicitly eligible or -// ineligible and whether the job has escaped computed node classes. -func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: e.TriggeredBy, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusBlocked, - PreviousEval: e.ID, - ClassEligibility: classEligibility, - EscapedComputedClass: escaped, - } -} - -// CreateFailedFollowUpEval creates a follow up evaluation when the current one -// has been marked as failed because it has hit the delivery limit and will not -// be retried by the eval_broker. -func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation { - return &Evaluation{ - ID: GenerateUUID(), - Namespace: e.Namespace, - Priority: e.Priority, - Type: e.Type, - TriggeredBy: EvalTriggerFailedFollowUp, - JobID: e.JobID, - JobModifyIndex: e.JobModifyIndex, - Status: EvalStatusPending, - Wait: wait, - PreviousEval: e.ID, - } -} - -// Plan is used to submit a commit plan for task allocations. These -// are submitted to the leader which verifies that resources have -// not been overcommitted before admiting the plan. -type Plan struct { - // EvalID is the evaluation ID this plan is associated with - EvalID string - - // EvalToken is used to prevent a split-brain processing of - // an evaluation. There should only be a single scheduler running - // an Eval at a time, but this could be violated after a leadership - // transition. This unique token is used to reject plans that are - // being submitted from a different leader. - EvalToken string - - // Priority is the priority of the upstream job - Priority int - - // AllAtOnce is used to control if incremental scheduling of task groups - // is allowed or if we must do a gang scheduling of the entire job. - // If this is false, a plan may be partially applied. Otherwise, the - // entire plan must be able to make progress. - AllAtOnce bool - - // Job is the parent job of all the allocations in the Plan. - // Since a Plan only involves a single Job, we can reduce the size - // of the plan by only including it once. - Job *Job - - // NodeUpdate contains all the allocations for each node. For each node, - // this is a list of the allocations to update to either stop or evict. - NodeUpdate map[string][]*Allocation - - // NodeAllocation contains all the allocations for each node. - // The evicts must be considered prior to the allocations. - NodeAllocation map[string][]*Allocation - - // Annotations contains annotations by the scheduler to be used by operators - // to understand the decisions made by the scheduler. - Annotations *PlanAnnotations - - // Deployment is the deployment created or updated by the scheduler that - // should be applied by the planner. - Deployment *Deployment - - // DeploymentUpdates is a set of status updates to apply to the given - // deployments. This allows the scheduler to cancel any unneeded deployment - // because the job is stopped or the update block is removed. - DeploymentUpdates []*DeploymentStatusUpdate -} - -// AppendUpdate marks the allocation for eviction. The clientStatus of the -// allocation may be optionally set by passing in a non-empty value. -func (p *Plan) AppendUpdate(alloc *Allocation, desiredStatus, desiredDesc, clientStatus string) { - newAlloc := new(Allocation) - *newAlloc = *alloc - - // If the job is not set in the plan we are deregistering a job so we - // extract the job from the allocation. - if p.Job == nil && newAlloc.Job != nil { - p.Job = newAlloc.Job - } - - // Normalize the job - newAlloc.Job = nil - - // Strip the resources as it can be rebuilt. - newAlloc.Resources = nil - - newAlloc.DesiredStatus = desiredStatus - newAlloc.DesiredDescription = desiredDesc - - if clientStatus != "" { - newAlloc.ClientStatus = clientStatus - } - - node := alloc.NodeID - existing := p.NodeUpdate[node] - p.NodeUpdate[node] = append(existing, newAlloc) -} - -func (p *Plan) PopUpdate(alloc *Allocation) { - existing := p.NodeUpdate[alloc.NodeID] - n := len(existing) - if n > 0 && existing[n-1].ID == alloc.ID { - existing = existing[:n-1] - if len(existing) > 0 { - p.NodeUpdate[alloc.NodeID] = existing - } else { - delete(p.NodeUpdate, alloc.NodeID) - } - } -} - -func (p *Plan) AppendAlloc(alloc *Allocation) { - node := alloc.NodeID - existing := p.NodeAllocation[node] - p.NodeAllocation[node] = append(existing, alloc) -} - -// IsNoOp checks if this plan would do nothing -func (p *Plan) IsNoOp() bool { - return len(p.NodeUpdate) == 0 && - len(p.NodeAllocation) == 0 && - p.Deployment == nil && - len(p.DeploymentUpdates) == 0 -} - -// PlanResult is the result of a plan submitted to the leader. -type PlanResult struct { - // NodeUpdate contains all the updates that were committed. - NodeUpdate map[string][]*Allocation - - // NodeAllocation contains all the allocations that were committed. - NodeAllocation map[string][]*Allocation - - // Deployment is the deployment that was committed. - Deployment *Deployment - - // DeploymentUpdates is the set of deployment updates that were committed. - DeploymentUpdates []*DeploymentStatusUpdate - - // RefreshIndex is the index the worker should refresh state up to. - // This allows all evictions and allocations to be materialized. - // If any allocations were rejected due to stale data (node state, - // over committed) this can be used to force a worker refresh. - RefreshIndex uint64 - - // AllocIndex is the Raft index in which the evictions and - // allocations took place. This is used for the write index. - AllocIndex uint64 -} - -// IsNoOp checks if this plan result would do nothing -func (p *PlanResult) IsNoOp() bool { - return len(p.NodeUpdate) == 0 && len(p.NodeAllocation) == 0 && - len(p.DeploymentUpdates) == 0 && p.Deployment == nil -} - -// FullCommit is used to check if all the allocations in a plan -// were committed as part of the result. Returns if there was -// a match, and the number of expected and actual allocations. -func (p *PlanResult) FullCommit(plan *Plan) (bool, int, int) { - expected := 0 - actual := 0 - for name, allocList := range plan.NodeAllocation { - didAlloc, _ := p.NodeAllocation[name] - expected += len(allocList) - actual += len(didAlloc) - } - return actual == expected, expected, actual -} - -// PlanAnnotations holds annotations made by the scheduler to give further debug -// information to operators. -type PlanAnnotations struct { - // DesiredTGUpdates is the set of desired updates per task group. - DesiredTGUpdates map[string]*DesiredUpdates -} - -// DesiredUpdates is the set of changes the scheduler would like to make given -// sufficient resources and cluster capacity. -type DesiredUpdates struct { - Ignore uint64 - Place uint64 - Migrate uint64 - Stop uint64 - InPlaceUpdate uint64 - DestructiveUpdate uint64 - Canary uint64 -} - -func (d *DesiredUpdates) GoString() string { - return fmt.Sprintf("(place %d) (inplace %d) (destructive %d) (stop %d) (migrate %d) (ignore %d) (canary %d)", - d.Place, d.InPlaceUpdate, d.DestructiveUpdate, d.Stop, d.Migrate, d.Ignore, d.Canary) -} - -// msgpackHandle is a shared handle for encoding/decoding of structs -var MsgpackHandle = func() *codec.MsgpackHandle { - h := &codec.MsgpackHandle{RawToString: true} - - // Sets the default type for decoding a map into a nil interface{}. - // This is necessary in particular because we store the driver configs as a - // nil interface{}. - h.MapType = reflect.TypeOf(map[string]interface{}(nil)) - return h -}() - -var ( - // JsonHandle and JsonHandlePretty are the codec handles to JSON encode - // structs. The pretty handle will add indents for easier human consumption. - JsonHandle = &codec.JsonHandle{ - HTMLCharsAsIs: true, - } - JsonHandlePretty = &codec.JsonHandle{ - HTMLCharsAsIs: true, - Indent: 4, - } -) - -var HashiMsgpackHandle = func() *hcodec.MsgpackHandle { - h := &hcodec.MsgpackHandle{RawToString: true} - - // Sets the default type for decoding a map into a nil interface{}. - // This is necessary in particular because we store the driver configs as a - // nil interface{}. - h.MapType = reflect.TypeOf(map[string]interface{}(nil)) - return h -}() - -// Decode is used to decode a MsgPack encoded object -func Decode(buf []byte, out interface{}) error { - return codec.NewDecoder(bytes.NewReader(buf), MsgpackHandle).Decode(out) -} - -// Encode is used to encode a MsgPack object with type prefix -func Encode(t MessageType, msg interface{}) ([]byte, error) { - var buf bytes.Buffer - buf.WriteByte(uint8(t)) - err := codec.NewEncoder(&buf, MsgpackHandle).Encode(msg) - return buf.Bytes(), err -} - -// KeyringResponse is a unified key response and can be used for install, -// remove, use, as well as listing key queries. -type KeyringResponse struct { - Messages map[string]string - Keys map[string]int - NumNodes int -} - -// KeyringRequest is request objects for serf key operations. -type KeyringRequest struct { - Key string -} - -// RecoverableError wraps an error and marks whether it is recoverable and could -// be retried or it is fatal. -type RecoverableError struct { - Err string - Recoverable bool -} - -// NewRecoverableError is used to wrap an error and mark it as recoverable or -// not. -func NewRecoverableError(e error, recoverable bool) error { - if e == nil { - return nil - } - - return &RecoverableError{ - Err: e.Error(), - Recoverable: recoverable, - } -} - -// WrapRecoverable wraps an existing error in a new RecoverableError with a new -// message. If the error was recoverable before the returned error is as well; -// otherwise it is unrecoverable. -func WrapRecoverable(msg string, err error) error { - return &RecoverableError{Err: msg, Recoverable: IsRecoverable(err)} -} - -func (r *RecoverableError) Error() string { - return r.Err -} - -func (r *RecoverableError) IsRecoverable() bool { - return r.Recoverable -} - -// Recoverable is an interface for errors to implement to indicate whether or -// not they are fatal or recoverable. -type Recoverable interface { - error - IsRecoverable() bool -} - -// IsRecoverable returns true if error is a RecoverableError with -// Recoverable=true. Otherwise false is returned. -func IsRecoverable(e error) bool { - if re, ok := e.(Recoverable); ok { - return re.IsRecoverable() - } - return false -} - -// ACLPolicy is used to represent an ACL policy -type ACLPolicy struct { - Name string // Unique name - Description string // Human readable - Rules string // HCL or JSON format - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 -} - -// SetHash is used to compute and set the hash of the ACL policy -func (c *ACLPolicy) SetHash() []byte { - // Initialize a 256bit Blake2 hash (32 bytes) - hash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - - // Write all the user set fields - hash.Write([]byte(c.Name)) - hash.Write([]byte(c.Description)) - hash.Write([]byte(c.Rules)) - - // Finalize the hash - hashVal := hash.Sum(nil) - - // Set and return the hash - c.Hash = hashVal - return hashVal -} - -func (a *ACLPolicy) Stub() *ACLPolicyListStub { - return &ACLPolicyListStub{ - Name: a.Name, - Description: a.Description, - Hash: a.Hash, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - } -} - -func (a *ACLPolicy) Validate() error { - var mErr multierror.Error - if !validPolicyName.MatchString(a.Name) { - err := fmt.Errorf("invalid name '%s'", a.Name) - mErr.Errors = append(mErr.Errors, err) - } - if _, err := acl.Parse(a.Rules); err != nil { - err = fmt.Errorf("failed to parse rules: %v", err) - mErr.Errors = append(mErr.Errors, err) - } - if len(a.Description) > maxPolicyDescriptionLength { - err := fmt.Errorf("description longer than %d", maxPolicyDescriptionLength) - mErr.Errors = append(mErr.Errors, err) - } - return mErr.ErrorOrNil() -} - -// ACLPolicyListStub is used to for listing ACL policies -type ACLPolicyListStub struct { - Name string - Description string - Hash []byte - CreateIndex uint64 - ModifyIndex uint64 -} - -// ACLPolicyListRequest is used to request a list of policies -type ACLPolicyListRequest struct { - QueryOptions -} - -// ACLPolicySpecificRequest is used to query a specific policy -type ACLPolicySpecificRequest struct { - Name string - QueryOptions -} - -// ACLPolicySetRequest is used to query a set of policies -type ACLPolicySetRequest struct { - Names []string - QueryOptions -} - -// ACLPolicyListResponse is used for a list request -type ACLPolicyListResponse struct { - Policies []*ACLPolicyListStub - QueryMeta -} - -// SingleACLPolicyResponse is used to return a single policy -type SingleACLPolicyResponse struct { - Policy *ACLPolicy - QueryMeta -} - -// ACLPolicySetResponse is used to return a set of policies -type ACLPolicySetResponse struct { - Policies map[string]*ACLPolicy - QueryMeta -} - -// ACLPolicyDeleteRequest is used to delete a set of policies -type ACLPolicyDeleteRequest struct { - Names []string - WriteRequest -} - -// ACLPolicyUpsertRequest is used to upsert a set of policies -type ACLPolicyUpsertRequest struct { - Policies []*ACLPolicy - WriteRequest -} - -// ACLToken represents a client token which is used to Authenticate -type ACLToken struct { - AccessorID string // Public Accessor ID (UUID) - SecretID string // Secret ID, private (UUID) - Name string // Human friendly name - Type string // Client or Management - Policies []string // Policies this token ties to - Global bool // Global or Region local - Hash []byte - CreateTime time.Time // Time of creation - CreateIndex uint64 - ModifyIndex uint64 -} - -var ( - // AnonymousACLToken is used no SecretID is provided, and the - // request is made anonymously. - AnonymousACLToken = &ACLToken{ - AccessorID: "anonymous", - Name: "Anonymous Token", - Type: ACLClientToken, - Policies: []string{"anonymous"}, - Global: false, - } -) - -type ACLTokenListStub struct { - AccessorID string - Name string - Type string - Policies []string - Global bool - Hash []byte - CreateTime time.Time - CreateIndex uint64 - ModifyIndex uint64 -} - -// SetHash is used to compute and set the hash of the ACL token -func (a *ACLToken) SetHash() []byte { - // Initialize a 256bit Blake2 hash (32 bytes) - hash, err := blake2b.New256(nil) - if err != nil { - panic(err) - } - - // Write all the user set fields - hash.Write([]byte(a.Name)) - hash.Write([]byte(a.Type)) - for _, policyName := range a.Policies { - hash.Write([]byte(policyName)) - } - if a.Global { - hash.Write([]byte("global")) - } else { - hash.Write([]byte("local")) - } - - // Finalize the hash - hashVal := hash.Sum(nil) - - // Set and return the hash - a.Hash = hashVal - return hashVal -} - -func (a *ACLToken) Stub() *ACLTokenListStub { - return &ACLTokenListStub{ - AccessorID: a.AccessorID, - Name: a.Name, - Type: a.Type, - Policies: a.Policies, - Global: a.Global, - Hash: a.Hash, - CreateTime: a.CreateTime, - CreateIndex: a.CreateIndex, - ModifyIndex: a.ModifyIndex, - } -} - -// Validate is used to sanity check a token -func (a *ACLToken) Validate() error { - var mErr multierror.Error - if len(a.Name) > maxTokenNameLength { - mErr.Errors = append(mErr.Errors, fmt.Errorf("token name too long")) - } - switch a.Type { - case ACLClientToken: - if len(a.Policies) == 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("client token missing policies")) - } - case ACLManagementToken: - if len(a.Policies) != 0 { - mErr.Errors = append(mErr.Errors, fmt.Errorf("management token cannot be associated with policies")) - } - default: - mErr.Errors = append(mErr.Errors, fmt.Errorf("token type must be client or management")) - } - return mErr.ErrorOrNil() -} - -// PolicySubset checks if a given set of policies is a subset of the token -func (a *ACLToken) PolicySubset(policies []string) bool { - // Hot-path the management tokens, superset of all policies. - if a.Type == ACLManagementToken { - return true - } - associatedPolicies := make(map[string]struct{}, len(a.Policies)) - for _, policy := range a.Policies { - associatedPolicies[policy] = struct{}{} - } - for _, policy := range policies { - if _, ok := associatedPolicies[policy]; !ok { - return false - } - } - return true -} - -// ACLTokenListRequest is used to request a list of tokens -type ACLTokenListRequest struct { - GlobalOnly bool - QueryOptions -} - -// ACLTokenSpecificRequest is used to query a specific token -type ACLTokenSpecificRequest struct { - AccessorID string - QueryOptions -} - -// ACLTokenSetRequest is used to query a set of tokens -type ACLTokenSetRequest struct { - AccessorIDS []string - QueryOptions -} - -// ACLTokenListResponse is used for a list request -type ACLTokenListResponse struct { - Tokens []*ACLTokenListStub - QueryMeta -} - -// SingleACLTokenResponse is used to return a single token -type SingleACLTokenResponse struct { - Token *ACLToken - QueryMeta -} - -// ACLTokenSetResponse is used to return a set of token -type ACLTokenSetResponse struct { - Tokens map[string]*ACLToken // Keyed by Accessor ID - QueryMeta -} - -// ResolveACLTokenRequest is used to resolve a specific token -type ResolveACLTokenRequest struct { - SecretID string - QueryOptions -} - -// ResolveACLTokenResponse is used to resolve a single token -type ResolveACLTokenResponse struct { - Token *ACLToken - QueryMeta -} - -// ACLTokenDeleteRequest is used to delete a set of tokens -type ACLTokenDeleteRequest struct { - AccessorIDs []string - WriteRequest -} - -// ACLTokenBootstrapRequest is used to bootstrap ACLs -type ACLTokenBootstrapRequest struct { - Token *ACLToken // Not client specifiable - ResetIndex uint64 // Reset index is used to clear the bootstrap token - WriteRequest -} - -// ACLTokenUpsertRequest is used to upsert a set of tokens -type ACLTokenUpsertRequest struct { - Tokens []*ACLToken - WriteRequest -} - -// ACLTokenUpsertResponse is used to return from an ACLTokenUpsertRequest -type ACLTokenUpsertResponse struct { - Tokens []*ACLToken - WriteMeta -} diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go b/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go deleted file mode 100644 index bdc324bb5c..0000000000 --- a/vendor/github.com/hashicorp/nomad/nomad/structs/structs_codegen.go +++ /dev/null @@ -1,3 +0,0 @@ -package structs - -//go:generate codecgen -d 100 -o structs.generated.go structs.go diff --git a/vendor/vendor.json b/vendor/vendor.json index 2551f9e614..d8a1887a14 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1147,28 +1147,28 @@ "revisionTime": "2017-09-14T15:46:24Z" }, { - "checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=", + "checksumSHA1": "euodRTxiXS6udU7N9xRCQL6YDCg=", "path": "github.com/hashicorp/nomad/api", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { "checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=", "path": "github.com/hashicorp/nomad/api/contexts", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { - "checksumSHA1": "GpikwcF9oi5Rrs/58xDSfiMy/I8=", + "checksumSHA1": "DE+4s/X+r987Ia93s9633mGekzg=", "path": "github.com/hashicorp/nomad/helper", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { - "checksumSHA1": "hrzGvgMsH9p6MKOu3zYS8fooL3g=", - "path": "github.com/hashicorp/nomad/nomad/structs", - "revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e", - "revisionTime": "2017-09-20T19:48:06Z" + "checksumSHA1": "mSCo/iZUEOSpeX5NsGZZzFMJqto=", + "path": "github.com/hashicorp/nomad/helper/uuid", + "revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1", + "revisionTime": "2017-09-29T21:44:31Z" }, { "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", From a393b20cb6d96c73e52eb5af776c892b8107a45d Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Mon, 2 Oct 2017 13:47:03 -0400 Subject: [PATCH 016/329] fixing dependencies --- .../hashicorp/nomad/helper/uuid/uuid.go | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go diff --git a/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go new file mode 100644 index 0000000000..145c817803 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/uuid/uuid.go @@ -0,0 +1,21 @@ +package uuid + +import ( + crand "crypto/rand" + "fmt" +) + +// Generate is used to generate a random UUID +func Generate() string { + buf := make([]byte, 16) + if _, err := crand.Read(buf); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]) +} From 72b0a2fcdb59abf3865f59ebc13624410cbe80a5 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 6 Oct 2017 16:03:06 +0100 Subject: [PATCH 017/329] Adding Nomad docs to the nav. Minor cosmetics fixes --- website/source/docs/secrets/nomad/index.html.md | 1 + website/source/layouts/api.erb | 3 +++ website/source/layouts/docs.erb | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index d5b87d107b..3f11148ca5 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -32,6 +32,7 @@ Successfully mounted 'nomad' at 'nomad'! For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this is discouraged for production deployments. + ``` $ nomad acl bootstrap Accessor ID = 95a0ee55-eaa6-2c0a-a900-ed94c156754e diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index ffea6b0e41..26c394e960 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -19,6 +19,9 @@ > Consul + > + Nomad + > Cubbyhole diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9481cf3192..2707dd8717 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -179,6 +179,10 @@ Consul + > + Nomad + + > Cubbyhole From f8e6f9ed70f5e22bf253ebdc8da24a4e76d9a834 Mon Sep 17 00:00:00 2001 From: Christophe Tafani-Dereeper Date: Tue, 24 Oct 2017 16:33:57 +0200 Subject: [PATCH 018/329] Correct typos in the sys/raw documentation (#3484) --- website/source/api/system/raw.html.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/api/system/raw.html.md b/website/source/api/system/raw.html.md index 7963dbb50e..68766bd1ca 100644 --- a/website/source/api/system/raw.html.md +++ b/website/source/api/system/raw.html.md @@ -3,14 +3,14 @@ layout: "api" page_title: "/sys/raw - HTTP API" sidebar_current: "docs-http-system-raw" description: |- - The `/sys/raw` endpoint is access the raw underlying store in Vault. + The `/sys/raw` endpoint is used to access the raw underlying store in Vault. --- # `/sys/raw` -The `/sys/raw` endpoint is access the raw underlying store in Vault. +The `/sys/raw` endpoint is used to access the raw underlying store in Vault. -This endpont is off by default. See the +This endpoint is off by default. See the [Vault configuration documentation](/docs/configuration/index.html) to enable. From ea60ab63b8dd622f7ddbeffc962983935e0f23c1 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 24 Oct 2017 16:58:53 -0400 Subject: [PATCH 019/329] If no clusterAddr is given but we have a single synthesized cluster addresses, automatically use it. (#3486) --- vault/cluster.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vault/cluster.go b/vault/cluster.go index beca4b9646..f00e830aa5 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -462,6 +462,9 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) { func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) { c.clusterListenerAddrs = addrs + if c.clusterAddr == "" && len(addrs) == 1 { + c.clusterAddr = addrs[0].String() + } } func (c *Core) SetClusterHandler(handler http.Handler) { From 4ac059d25f7d55e7ca70fc210b3cdb965908c262 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 25 Oct 2017 11:59:02 -0400 Subject: [PATCH 020/329] Update storedBarrierKeysPath name --- vault/seal.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vault/seal.go b/vault/seal.go index e1a3c3a62d..92d727e4a8 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -33,8 +33,8 @@ const ( // recoveryKeyPath is the path to the recovery key recoveryKeyPath = "core/recovery-key" - // hsmStoredKeysPath is the path used for storing HSM-encrypted unseal keys - hsmStoredKeysPath = "core/hsm/barrier-unseal-keys" + // storedBarrierKeysPath is the path used for storing HSM-encrypted unseal keys + storedBarrierKeysPath = "core/hsm/barrier-unseal-keys" // hsmStoredIVPath is the path to the initialization vector for stored keys hsmStoredIVPath = "core/hsm/iv" From 55109f6c569125d569dee061dcc019d82c4a176f Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 25 Oct 2017 14:43:05 -0400 Subject: [PATCH 021/329] Properly format autogenerated clusteraddr --- vault/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vault/cluster.go b/vault/cluster.go index f00e830aa5..d65c8fa19e 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -463,7 +463,7 @@ func (c *Core) ClusterTLSConfig() (*tls.Config, error) { func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) { c.clusterListenerAddrs = addrs if c.clusterAddr == "" && len(addrs) == 1 { - c.clusterAddr = addrs[0].String() + c.clusterAddr = fmt.Sprintf("https://%s", addrs[0].String()) } } From bdd70ff0326b1a93241d3e079b7f174cefe8e727 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Thu, 26 Oct 2017 00:08:10 -0700 Subject: [PATCH 022/329] Fix a logic bug in the respondRaw function (#3491) --- http/logical.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http/logical.go b/http/logical.go index 642314e59a..5aa7d077d8 100644 --- a/http/logical.go +++ b/http/logical.go @@ -212,7 +212,7 @@ func respondRaw(w http.ResponseWriter, r *http.Request, resp *logical.Response) // Get the content type header; don't require it if the body is empty contentTypeRaw, ok := resp.Data[logical.HTTPContentType] - if !ok && !nonEmpty { + if !ok && nonEmpty { retErr(w, "no content type given") return } From 1eaa214d1ef66b5dadc853e04279c586406883d9 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 19 Oct 2017 12:13:43 -0400 Subject: [PATCH 023/329] Allow underscores at the start of directories in file backend. Fixes #3476 --- physical/file/file.go | 12 +++++++++--- physical/testing.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/physical/file/file.go b/physical/file/file.go index b02efc78c2..f7f51928af 100644 --- a/physical/file/file.go +++ b/physical/file/file.go @@ -247,10 +247,16 @@ func (b *FileBackend) ListInternal(prefix string) ([]string, error) { } for i, name := range names { - if name[0] == '_' { - names[i] = name[1:] - } else { + fi, err := os.Stat(filepath.Join(path, name)) + if err != nil { + return nil, err + } + if fi.IsDir() { names[i] = name + "/" + } else { + if name[0] == '_' { + names[i] = name[1:] + } } } diff --git a/physical/testing.go b/physical/testing.go index 4c75378608..6826a95acc 100644 --- a/physical/testing.go +++ b/physical/testing.go @@ -194,6 +194,45 @@ func ExerciseBackend(t *testing.T, b Backend) { if len(keys) != 0 { t.Fatalf("bad: %v", keys) } + + // Underscores should not trip things up; ref GH-3476 + e = &Entry{Key: "_zip/_zap", Value: []byte("foobar")} + err = b.Put(e) + if err != nil { + t.Fatalf("err: %v", err) + } + e, err = b.Get("_zip/_zap") + if err != nil { + t.Fatalf("err: %v", err) + } + if e == nil { + t.Fatal("got nil entry") + } + vals, err := b.List("") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zip/" { + t.Fatalf("bad: %v", vals) + } + vals, err = b.List("_zip/") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zap" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete("_zip/_zap") + if err != nil { + t.Fatal(err) + } + vals, err = b.List("") + if err != nil { + t.Fatal(err) + } + if len(vals) != 0 { + t.Fatalf("bad: %v", vals) + } } func ExerciseBackend_ListPrefix(t *testing.T, b Backend) { From 67485b4705ac2ab2c0c19a7ea26b25fcb49b04fe Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 19 Oct 2017 12:27:56 -0400 Subject: [PATCH 024/329] Add some more tests --- physical/testing.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/physical/testing.go b/physical/testing.go index 6826a95acc..e6f55cb6fb 100644 --- a/physical/testing.go +++ b/physical/testing.go @@ -196,7 +196,12 @@ func ExerciseBackend(t *testing.T, b Backend) { } // Underscores should not trip things up; ref GH-3476 - e = &Entry{Key: "_zip/_zap", Value: []byte("foobar")} + e = &Entry{Key: "_zip", Value: []byte("foobar")} + err = b.Put(e) + if err != nil { + t.Fatalf("err: %v", err) + } + e = &Entry{Key: "_zip/_zap", Value: []byte("boofar")} err = b.Put(e) if err != nil { t.Fatalf("err: %v", err) @@ -212,7 +217,7 @@ func ExerciseBackend(t *testing.T, b Backend) { if err != nil { t.Fatal(err) } - if len(vals) != 1 || vals[0] != "_zip/" { + if len(vals) != 2 || vals[0] != "_zip/" || vals[1] != "_zip" { t.Fatalf("bad: %v", vals) } vals, err = b.List("_zip/") @@ -230,6 +235,17 @@ func ExerciseBackend(t *testing.T, b Backend) { if err != nil { t.Fatal(err) } + if len(vals) != 1 || vals[0] != "_zip" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete("_zip") + if err != nil { + t.Fatal(err) + } + vals, err = b.List("") + if err != nil { + t.Fatal(err) + } if len(vals) != 0 { t.Fatalf("bad: %v", vals) } From 037dfeb83c69eccabc680bf14b65e42ba8edf3c9 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 19 Oct 2017 16:23:58 -0400 Subject: [PATCH 025/329] Fix testing --- physical/testing.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/physical/testing.go b/physical/testing.go index e6f55cb6fb..3ea42c2bbf 100644 --- a/physical/testing.go +++ b/physical/testing.go @@ -217,9 +217,14 @@ func ExerciseBackend(t *testing.T, b Backend) { if err != nil { t.Fatal(err) } - if len(vals) != 2 || vals[0] != "_zip/" || vals[1] != "_zip" { + if len(vals) != 2 || vals[0] == vals[1] { t.Fatalf("bad: %v", vals) } + for _, val := range vals { + if val != "_zip/" || val != "_zip" { + t.Fatalf("bad val: %v", val) + } + } vals, err = b.List("_zip/") if err != nil { t.Fatal(err) From 9c7b0d05ff8d2423125542ce45b34142ef877902 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 19 Oct 2017 16:53:13 -0400 Subject: [PATCH 026/329] Fix more tests --- physical/testing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physical/testing.go b/physical/testing.go index 3ea42c2bbf..3e83069428 100644 --- a/physical/testing.go +++ b/physical/testing.go @@ -221,7 +221,7 @@ func ExerciseBackend(t *testing.T, b Backend) { t.Fatalf("bad: %v", vals) } for _, val := range vals { - if val != "_zip/" || val != "_zip" { + if val != "_zip/" && val != "_zip" { t.Fatalf("bad val: %v", val) } } From 04f7af1f5594a98f46857b898673d6d22c25717f Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 20 Oct 2017 18:24:15 -0400 Subject: [PATCH 027/329] Add prefixing to couch to fix the error that was exposed --- physical/couchdb/couchdb.go | 36 +++++++++++++++++-- .../configuration/storage/couchdb.html.md | 6 ++++ .../guides/upgrading/upgrade-to-0.9.0.html.md | 24 +++++++++++++ website/source/layouts/guides.erb | 3 ++ 4 files changed, 67 insertions(+), 2 deletions(-) create mode 100644 website/source/guides/upgrading/upgrade-to-0.9.0.html.md diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index e7f945f118..a24668cfb3 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -22,6 +22,7 @@ import ( // CouchDBBackend allows the management of couchdb users type CouchDBBackend struct { logger log.Logger + prefixed bool client *couchDBClient permitPool *physical.PermitPool } @@ -158,6 +159,19 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac username = conf["username"] } + prefixed := true + prefixedStr := os.Getenv("COUCHDB_PREFIXED") + if prefixedStr == "" { + prefixedStr = conf["prefixed"] + } + if prefixedStr != "" { + var err error + prefixed, err = strconv.ParseBool(prefixedStr) + if err != nil { + return nil, err + } + } + password := os.Getenv("COUCHDB_PASSWORD") if password == "" { password = conf["password"] @@ -183,6 +197,7 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac password: password, Client: cleanhttp.DefaultPooledClient(), }, + prefixed: prefixed, logger: logger, permitPool: physical.NewPermitPool(maxParInt), }, nil @@ -227,6 +242,10 @@ func (m *CouchDBBackend) Delete(key string) error { func (m *CouchDBBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now()) + if m.prefixed { + prefix = "$" + prefix + } + m.permitPool.Acquire() defer m.permitPool.Release() @@ -275,6 +294,10 @@ func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) ( func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now()) + if m.prefixed { + key = "$" + key + } + return m.client.get(key) } @@ -282,12 +305,17 @@ func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now()) - revision, _ := m.client.rev(url.PathEscape(entry.Key)) + key := entry.Key + if m.prefixed { + key = "$" + entry.Key + } + + revision, _ := m.client.rev(url.PathEscape(key)) return m.client.put(couchDBEntry{ Entry: entry, Rev: revision, - ID: url.PathEscape(entry.Key), + ID: url.PathEscape(key), }) } @@ -295,6 +323,10 @@ func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { func (m *CouchDBBackend) DeleteInternal(key string) error { defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now()) + if m.prefixed { + key = "$" + key + } + revision, _ := m.client.rev(url.PathEscape(key)) deleted := true return m.client.put(couchDBEntry{ diff --git a/website/source/docs/configuration/storage/couchdb.html.md b/website/source/docs/configuration/storage/couchdb.html.md index 5df23e5dab..b9bb556c15 100644 --- a/website/source/docs/configuration/storage/couchdb.html.md +++ b/website/source/docs/configuration/storage/couchdb.html.md @@ -30,6 +30,12 @@ storage "couchdb" { ## `couchdb` Parameters +- `prefixed` `(string: "true")` – Specifies whether each value written to + CouchDB should be prefixed with `$`. If turned off, Vault may run into error + conditions if values are written that begin with an underscore, since it is a + reserved prefix in CouchDB. This can also be provided via the environment + variable `COUCHDB_PREFIXED`. + - `endpoint` `(string: "")` – Specifies your CouchDB endpoint. This can also be provided via the environment variable `COUCHDB_ENDPOINT`. diff --git a/website/source/guides/upgrading/upgrade-to-0.9.0.html.md b/website/source/guides/upgrading/upgrade-to-0.9.0.html.md new file mode 100644 index 0000000000..33a7106582 --- /dev/null +++ b/website/source/guides/upgrading/upgrade-to-0.9.0.html.md @@ -0,0 +1,24 @@ +--- +layout: "guides" +page_title: "Upgrading to Vault 0.9.0 - Guides" +sidebar_current: "guides-upgrading-to-0.9.0" +description: |- + This page contains the list of deprecations and important or breaking changes + for Vault 0.9.0. Please read it carefully. +--- + +# Overview + +This page contains the list of deprecations and important or breaking changes +for Vault 0.9.0 compared to the most recent release. Please read it carefully. + +## CouchDB Storage Changes + +Vault may write values to storage that start with an underscore (`_`) +character. This is a reserved character in CouchDB, which can cause breakage. +As a result, this backend now stores each value prefixed with a `$` character. + +If you are upgrading from existing CouchDB usage, you can turn off this +behavior by setting the `"prefixed"` configuration value to `"false"`. +Alternately, if you need to handle underscores at the start of keys, you can +rewrite your existing keys to start with a `$` character. diff --git a/website/source/layouts/guides.erb b/website/source/layouts/guides.erb index e95bb84f67..c1f712d1fe 100644 --- a/website/source/layouts/guides.erb +++ b/website/source/layouts/guides.erb @@ -53,6 +53,9 @@ > Upgrade to 0.8.0 + > + Upgrade to 0.9.0 + From 5d1e06ae93bc4f4c5fae167cede1521c58b9c4f5 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 26 Oct 2017 14:54:30 -0400 Subject: [PATCH 028/329] Change prefix to a string that can be specified, rather than a bool --- physical/couchdb/couchdb.go | 35 ++++++++----------- .../configuration/storage/couchdb.html.md | 7 ++-- 2 files changed, 17 insertions(+), 25 deletions(-) diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index a24668cfb3..6fb7ab5645 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -22,7 +22,7 @@ import ( // CouchDBBackend allows the management of couchdb users type CouchDBBackend struct { logger log.Logger - prefixed bool + prefix string client *couchDBClient permitPool *physical.PermitPool } @@ -159,17 +159,10 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac username = conf["username"] } - prefixed := true - prefixedStr := os.Getenv("COUCHDB_PREFIXED") - if prefixedStr == "" { - prefixedStr = conf["prefixed"] - } - if prefixedStr != "" { - var err error - prefixed, err = strconv.ParseBool(prefixedStr) - if err != nil { - return nil, err - } + prefix := "$" + prefixedStr, ok := conf["prefixed"] + if ok { + prefix = prefixedStr } password := os.Getenv("COUCHDB_PASSWORD") @@ -197,7 +190,7 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac password: password, Client: cleanhttp.DefaultPooledClient(), }, - prefixed: prefixed, + prefix: prefix, logger: logger, permitPool: physical.NewPermitPool(maxParInt), }, nil @@ -242,8 +235,8 @@ func (m *CouchDBBackend) Delete(key string) error { func (m *CouchDBBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now()) - if m.prefixed { - prefix = "$" + prefix + if m.prefix != "" { + prefix = m.prefix + prefix } m.permitPool.Acquire() @@ -294,8 +287,8 @@ func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) ( func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now()) - if m.prefixed { - key = "$" + key + if m.prefix != "" { + key = m.prefix + key } return m.client.get(key) @@ -306,8 +299,8 @@ func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now()) key := entry.Key - if m.prefixed { - key = "$" + entry.Key + if m.prefix != "" { + key = m.prefix + entry.Key } revision, _ := m.client.rev(url.PathEscape(key)) @@ -323,8 +316,8 @@ func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { func (m *CouchDBBackend) DeleteInternal(key string) error { defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now()) - if m.prefixed { - key = "$" + key + if m.prefix != "" { + key = m.prefix + key } revision, _ := m.client.rev(url.PathEscape(key)) diff --git a/website/source/docs/configuration/storage/couchdb.html.md b/website/source/docs/configuration/storage/couchdb.html.md index b9bb556c15..50cc90cc59 100644 --- a/website/source/docs/configuration/storage/couchdb.html.md +++ b/website/source/docs/configuration/storage/couchdb.html.md @@ -30,11 +30,10 @@ storage "couchdb" { ## `couchdb` Parameters -- `prefixed` `(string: "true")` – Specifies whether each value written to - CouchDB should be prefixed with `$`. If turned off, Vault may run into error +- `prefix` `(string: "$")` – Specifies a prefix to use for each value written + to CouchDB. If turned off (by setting to `""`, Vault may run into error conditions if values are written that begin with an underscore, since it is a - reserved prefix in CouchDB. This can also be provided via the environment - variable `COUCHDB_PREFIXED`. + reserved prefix in CouchDB. - `endpoint` `(string: "")` – Specifies your CouchDB endpoint. This can also be provided via the environment variable `COUCHDB_ENDPOINT`. From 9973d282932a43cf030a98472bf077a3ed97dad2 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 26 Oct 2017 15:26:54 -0400 Subject: [PATCH 029/329] Revert couchdb changes --- physical/couchdb/couchdb.go | 29 ++----------------- .../configuration/storage/couchdb.html.md | 5 ---- .../guides/upgrading/upgrade-to-0.9.0.html.md | 24 --------------- website/source/layouts/guides.erb | 3 -- 4 files changed, 2 insertions(+), 59 deletions(-) delete mode 100644 website/source/guides/upgrading/upgrade-to-0.9.0.html.md diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 6fb7ab5645..e7f945f118 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -22,7 +22,6 @@ import ( // CouchDBBackend allows the management of couchdb users type CouchDBBackend struct { logger log.Logger - prefix string client *couchDBClient permitPool *physical.PermitPool } @@ -159,12 +158,6 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac username = conf["username"] } - prefix := "$" - prefixedStr, ok := conf["prefixed"] - if ok { - prefix = prefixedStr - } - password := os.Getenv("COUCHDB_PASSWORD") if password == "" { password = conf["password"] @@ -190,7 +183,6 @@ func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBac password: password, Client: cleanhttp.DefaultPooledClient(), }, - prefix: prefix, logger: logger, permitPool: physical.NewPermitPool(maxParInt), }, nil @@ -235,10 +227,6 @@ func (m *CouchDBBackend) Delete(key string) error { func (m *CouchDBBackend) List(prefix string) ([]string, error) { defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now()) - if m.prefix != "" { - prefix = m.prefix + prefix - } - m.permitPool.Acquire() defer m.permitPool.Release() @@ -287,10 +275,6 @@ func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) ( func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now()) - if m.prefix != "" { - key = m.prefix + key - } - return m.client.get(key) } @@ -298,17 +282,12 @@ func (m *CouchDBBackend) GetInternal(key string) (*physical.Entry, error) { func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now()) - key := entry.Key - if m.prefix != "" { - key = m.prefix + entry.Key - } - - revision, _ := m.client.rev(url.PathEscape(key)) + revision, _ := m.client.rev(url.PathEscape(entry.Key)) return m.client.put(couchDBEntry{ Entry: entry, Rev: revision, - ID: url.PathEscape(key), + ID: url.PathEscape(entry.Key), }) } @@ -316,10 +295,6 @@ func (m *CouchDBBackend) PutInternal(entry *physical.Entry) error { func (m *CouchDBBackend) DeleteInternal(key string) error { defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now()) - if m.prefix != "" { - key = m.prefix + key - } - revision, _ := m.client.rev(url.PathEscape(key)) deleted := true return m.client.put(couchDBEntry{ diff --git a/website/source/docs/configuration/storage/couchdb.html.md b/website/source/docs/configuration/storage/couchdb.html.md index 50cc90cc59..5df23e5dab 100644 --- a/website/source/docs/configuration/storage/couchdb.html.md +++ b/website/source/docs/configuration/storage/couchdb.html.md @@ -30,11 +30,6 @@ storage "couchdb" { ## `couchdb` Parameters -- `prefix` `(string: "$")` – Specifies a prefix to use for each value written - to CouchDB. If turned off (by setting to `""`, Vault may run into error - conditions if values are written that begin with an underscore, since it is a - reserved prefix in CouchDB. - - `endpoint` `(string: "")` – Specifies your CouchDB endpoint. This can also be provided via the environment variable `COUCHDB_ENDPOINT`. diff --git a/website/source/guides/upgrading/upgrade-to-0.9.0.html.md b/website/source/guides/upgrading/upgrade-to-0.9.0.html.md deleted file mode 100644 index 33a7106582..0000000000 --- a/website/source/guides/upgrading/upgrade-to-0.9.0.html.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -layout: "guides" -page_title: "Upgrading to Vault 0.9.0 - Guides" -sidebar_current: "guides-upgrading-to-0.9.0" -description: |- - This page contains the list of deprecations and important or breaking changes - for Vault 0.9.0. Please read it carefully. ---- - -# Overview - -This page contains the list of deprecations and important or breaking changes -for Vault 0.9.0 compared to the most recent release. Please read it carefully. - -## CouchDB Storage Changes - -Vault may write values to storage that start with an underscore (`_`) -character. This is a reserved character in CouchDB, which can cause breakage. -As a result, this backend now stores each value prefixed with a `$` character. - -If you are upgrading from existing CouchDB usage, you can turn off this -behavior by setting the `"prefixed"` configuration value to `"false"`. -Alternately, if you need to handle underscores at the start of keys, you can -rewrite your existing keys to start with a `$` character. diff --git a/website/source/layouts/guides.erb b/website/source/layouts/guides.erb index c1f712d1fe..e95bb84f67 100644 --- a/website/source/layouts/guides.erb +++ b/website/source/layouts/guides.erb @@ -53,9 +53,6 @@ > Upgrade to 0.8.0 - > - Upgrade to 0.9.0 - From 4ed4fb800be97d21e219983c969e7b6564d77999 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 26 Oct 2017 15:29:10 -0400 Subject: [PATCH 030/329] Move underscore tests to file from physical testing --- physical/file/file_test.go | 61 ++++++++++++++++++++++++++++++++++++++ physical/testing.go | 60 ------------------------------------- 2 files changed, 61 insertions(+), 60 deletions(-) diff --git a/physical/file/file_test.go b/physical/file/file_test.go index 6438e213ca..da9b6de0e9 100644 --- a/physical/file/file_test.go +++ b/physical/file/file_test.go @@ -173,5 +173,66 @@ func TestFileBackend(t *testing.T) { } physical.ExerciseBackend(t, b) + + // Underscores should not trip things up; ref GH-3476 + e := &physical.Entry{Key: "_zip", Value: []byte("foobar")} + err = b.Put(e) + if err != nil { + t.Fatalf("err: %v", err) + } + e = &physical.Entry{Key: "_zip/_zap", Value: []byte("boofar")} + err = b.Put(e) + if err != nil { + t.Fatalf("err: %v", err) + } + e, err = b.Get("_zip/_zap") + if err != nil { + t.Fatalf("err: %v", err) + } + if e == nil { + t.Fatal("got nil entry") + } + vals, err := b.List("") + if err != nil { + t.Fatal(err) + } + if len(vals) != 2 || vals[0] == vals[1] { + t.Fatalf("bad: %v", vals) + } + for _, val := range vals { + if val != "_zip/" && val != "_zip" { + t.Fatalf("bad val: %v", val) + } + } + vals, err = b.List("_zip/") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zap" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete("_zip/_zap") + if err != nil { + t.Fatal(err) + } + vals, err = b.List("") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zip" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete("_zip") + if err != nil { + t.Fatal(err) + } + vals, err = b.List("") + if err != nil { + t.Fatal(err) + } + if len(vals) != 0 { + t.Fatalf("bad: %v", vals) + } + physical.ExerciseBackend_ListPrefix(t, b) } diff --git a/physical/testing.go b/physical/testing.go index 3e83069428..4c75378608 100644 --- a/physical/testing.go +++ b/physical/testing.go @@ -194,66 +194,6 @@ func ExerciseBackend(t *testing.T, b Backend) { if len(keys) != 0 { t.Fatalf("bad: %v", keys) } - - // Underscores should not trip things up; ref GH-3476 - e = &Entry{Key: "_zip", Value: []byte("foobar")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - e = &Entry{Key: "_zip/_zap", Value: []byte("boofar")} - err = b.Put(e) - if err != nil { - t.Fatalf("err: %v", err) - } - e, err = b.Get("_zip/_zap") - if err != nil { - t.Fatalf("err: %v", err) - } - if e == nil { - t.Fatal("got nil entry") - } - vals, err := b.List("") - if err != nil { - t.Fatal(err) - } - if len(vals) != 2 || vals[0] == vals[1] { - t.Fatalf("bad: %v", vals) - } - for _, val := range vals { - if val != "_zip/" && val != "_zip" { - t.Fatalf("bad val: %v", val) - } - } - vals, err = b.List("_zip/") - if err != nil { - t.Fatal(err) - } - if len(vals) != 1 || vals[0] != "_zap" { - t.Fatalf("bad: %v", vals) - } - err = b.Delete("_zip/_zap") - if err != nil { - t.Fatal(err) - } - vals, err = b.List("") - if err != nil { - t.Fatal(err) - } - if len(vals) != 1 || vals[0] != "_zip" { - t.Fatalf("bad: %v", vals) - } - err = b.Delete("_zip") - if err != nil { - t.Fatal(err) - } - vals, err = b.List("") - if err != nil { - t.Fatal(err) - } - if len(vals) != 0 { - t.Fatalf("bad: %v", vals) - } } func ExerciseBackend_ListPrefix(t *testing.T, b Backend) { From 672feed0e89717e052f2d604fcf5fb5b043478a9 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 26 Oct 2017 15:30:55 -0400 Subject: [PATCH 031/329] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36c01ebc29..f62fb16113 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,8 @@ BUG FIXES: * auth/radius: Fix logging in in some situations [GH-3461] * physical/etcd3: Fix some listing issues due to how etcd3 does prefix matching [GH-3406] + * physical/file: Fix listing when underscores are the first component of a + path [GH-3476] * plugins: Allow response errors to be returned from backend plugins [GH-3412] ## 0.8.3 (September 19th, 2017) From e26573cb7821ec3e791b68d9bdaf777e584dd63e Mon Sep 17 00:00:00 2001 From: AJ Bourg Date: Fri, 27 Oct 2017 08:42:33 -0600 Subject: [PATCH 032/329] Add a doc for the token helper (#3411) * Add token helper docs. * Update it so the new token helpers page appears in the navigation. --- .../source/docs/commands/token-helper.html.md | 63 +++++++++++++++++++ website/source/layouts/docs.erb | 3 + 2 files changed, 66 insertions(+) create mode 100644 website/source/docs/commands/token-helper.html.md diff --git a/website/source/docs/commands/token-helper.html.md b/website/source/docs/commands/token-helper.html.md new file mode 100644 index 0000000000..37c6b22a2e --- /dev/null +++ b/website/source/docs/commands/token-helper.html.md @@ -0,0 +1,63 @@ +--- +layout: "docs" +page_title: "Token Helpers" +sidebar_current: "docs-commands-token-helper" +description: |- + The Vault CLI supports external token helpers that make retrieving, setting and erasing tokens simpler to use. +--- + +# Token Helpers + +The Vault CLI provides a built in tool for authenticating to any of the enabled auth backends. By default the Vault CLI will take the generated token after a successful authentication and store it on disk in the `~/.vault_token` file. This functionality can change in Vault via the use of a token helper. A token helper is an external program that Vault calls to save, retrieve or erase a saved token. The token helper could be a very simple script or a more complex program depending on your needs. The interface to the external token helper is extremely simple. + +## Configuration + +To configure a token helper, edit (or create) the file `~/.vault` and add a line similar to: + +``` +token_helper = "/path/to/token/helper.sh" +``` + +You will need to use the fully qualified path to the token helper script. The script should be executable. + +## Developing a Token Helper + +The interface to a token helper is extremely simple: the script is passed with one argument that could be `get`, `store` or `erase`. If the argument is `get`, the script should do whatever work it needs to do to retrieve the stored token and then print the token to `STDOUT`. If the argument is `store`, Vault is asking you to store the token. Finally, if the argument is `erase`, your program should erase the stored token. + +If your program succeeds, it should exit with status code 0. If it encounters an issue that prevents it from working, it should exit with some other status code. You should write a user-friendly error message to `STDERR`. You should never write anything other than the token to `STDOUT`, as Vault assumes whatever it gets on `STDOUT` is the token. + +### Example Token Helper + +This is an example token helper written in Ruby that stores and retrieves tokens in a json file called `~/.vault_tokens`. The key is the environment variable $VAULT_ADDR, this allows the Vault user to easily store and retrieve tokens from a number of different Vault servers. + +``` +#!/usr/bin/env ruby + +require 'json' + +unless ENV['VAULT_ADDR'] + STDERR.puts "No VAULT_ADDR environment variable set. Set it and run me again!" + exit 100 +end + +begin + tokens = JSON.parse(File.read("#{ENV['HOME']}/.vault_tokens")) +rescue Errno::ENOENT => e + # file doesn't exist so create a blank hash for it + tokens = {} +end + +case ARGV.first +when 'get' + print tokens[ENV['VAULT_ADDR']] if tokens[ENV['VAULT_ADDR']] + exit 0 +when 'store' + tokens[ENV['VAULT_ADDR']] = STDIN.read +when 'erase' + tokens.delete!(ENV['VAULT_ADDR']) +end + +File.open("#{ENV['HOME']}/.vault_tokens", 'w') { |file| file.write(tokens.to_json) } +``` + + diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9481cf3192..9950198191 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -163,6 +163,9 @@ > Environment Variables + > + Token Helpers + From 6157a89f1b2036f9bd91c4fb6fba25f5e96af9d6 Mon Sep 17 00:00:00 2001 From: smeach <30698595+smeach@users.noreply.github.com> Date: Fri, 27 Oct 2017 15:44:56 +0100 Subject: [PATCH 033/329] Updated cli arg to reflect text description (#3487) --- website/source/docs/install/index.html.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/install/index.html.md b/website/source/docs/install/index.html.md index 587553368a..36eb930725 100644 --- a/website/source/docs/install/index.html.md +++ b/website/source/docs/install/index.html.md @@ -64,10 +64,10 @@ as a copy of [`git`](https://www.git-scm.com/) in your `PATH`. ## Verifying the Installation -To verify Vault is properly installed, run `vault -v` on your system. You should +To verify Vault is properly installed, run `vault -h` on your system. You should see help output. If you are executing it from the command line, make sure it is on your PATH or you may get an error about Vault not being found. ```shell -$ vault -v +$ vault -h ``` From 30aab2aa2feff6dd81ca379f2489132ff5bc284a Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 27 Oct 2017 11:23:15 -0400 Subject: [PATCH 034/329] aws-ec2: Avoid audit logging of custom nonces (#3381) --- builtin/credential/aws/backend_test.go | 14 +++++++++++++- builtin/credential/aws/path_login.go | 8 ++++---- website/source/docs/auth/aws.html.md | 5 +++++ 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index 881ca85dc9..e15d6daeb6 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -1125,6 +1125,11 @@ func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing. t.Fatalf("instance ID not present in the response object") } + _, ok := resp.Auth.Metadata["nonce"] + if ok { + t.Fatalf("client nonce should not have been returned") + } + loginInput["nonce"] = "changed-vault-client-nonce" // try to login again with changed nonce resp, err = b.HandleRequest(loginRequest) @@ -1159,7 +1164,9 @@ func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing. t.Fatalf("failed to delete whitelist identity") } - // Allow a fresh login. + // Allow a fresh login without supplying the nonce + delete(loginInput, "nonce") + resp, err = b.HandleRequest(loginRequest) if err != nil { t.Fatal(err) @@ -1167,6 +1174,11 @@ func TestBackendAcc_LoginWithInstanceIdentityDocAndWhitelistIdentity(t *testing. if resp == nil || resp.Auth == nil || resp.IsError() { t.Fatalf("login attempt failed") } + + _, ok = resp.Auth.Metadata["nonce"] + if !ok { + t.Fatalf("expected nonce to be returned") + } } func TestBackend_pathStsConfig(t *testing.T) { diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index d382701cfc..30c69047ed 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -643,7 +643,7 @@ func (b *backend) pathLoginUpdateEc2( return logical.ErrorResponse(err.Error()), nil } - // Don't let subsequent login attempts to bypass in initial + // Don't let subsequent login attempts to bypass the initial // intent of disabling reauthentication, despite the properties // of role getting updated. For example: Role has the value set // to 'false', a role-tag login sets the value to 'true', then @@ -693,7 +693,6 @@ func (b *backend) pathLoginUpdateEc2( if roleTagResp != nil { // Role tag is enabled on the role. - // // Overwrite the policies with the ones returned from processing the role tag // If there are no policies on the role tag, policies on the role are inherited. @@ -777,8 +776,9 @@ func (b *backend) pathLoginUpdateEc2( }, } - // Return the nonce only if reauthentication is allowed - if !disallowReauthentication { + // Return the nonce only if reauthentication is allowed and if the nonce + // was not supplied by the user. + if !disallowReauthentication && !clientNonceSupplied { // Echo the client nonce back. If nonce param was not supplied // to the endpoint at all (setting it to empty string does not // qualify here), callers should extract out the nonce from diff --git a/website/source/docs/auth/aws.html.md b/website/source/docs/auth/aws.html.md index cce67fddb6..2a0df244b3 100644 --- a/website/source/docs/auth/aws.html.md +++ b/website/source/docs/auth/aws.html.md @@ -384,6 +384,11 @@ instance, it is not a bad idea to firewall access to the signed PKCS#7 metadata to ensure that it is accessible only to the matching user(s) that require access. +The client nonce which is generated by the backend and which gets returned +along with the authentication response, will be audit logged in plaintext. If +this is undesired, clients can supply a custom nonce to the login endpoint +which will not be returned and hence will not be audit logged. + ## Advanced Options and Caveats ### Dynamic Management of Policies Via Role Tags From f7314938bf8548dd8b8ded984f6aff86d7008a51 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Fri, 27 Oct 2017 11:29:30 -0400 Subject: [PATCH 035/329] changelog++ --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f62fb16113..efd986703f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ ## 0.8.4 (Unreleased) +DEPRECATIONS/CHANGES: + * aws-ec2: The client nonce generated by the backend that gets returned along + with the authentication response will be audited in plaintext. If this is + undesired, the clients can choose to supply a custom nonce to the login + endpoint. The custom nonce set by the client will from now on, not be + returned back with the authentication response, and hence not audit logged. + IMPROVEMENTS: * api: Add ability to set custom headers on each call [GH-3394] From 6cfdd7b40c91fa5367ddd1f0018357abbdda9131 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 12:02:18 -0400 Subject: [PATCH 036/329] Rejig some error messages in pki --- builtin/logical/pki/ca_util.go | 4 ++-- builtin/logical/pki/path_issue_sign.go | 11 ++++++----- builtin/logical/pki/path_root.go | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index 7a6deda236..f0024a84cc 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -17,14 +17,14 @@ func (b *backend) getGenerationParams( case "internal": default: errorResp = logical.ErrorResponse( - `The "exported" path parameter must be "internal" or "exported"`) + `the "exported" path parameter must be "internal" or "exported"`) return } format = getFormat(data) if format == "" { errorResp = logical.ErrorResponse( - `The "format" path parameter must be "pem", "der", or "pem_bundle"`) + `the "format" path parameter must be "pem", "der", "der_pkcs", or "pem_bundle"`) return } diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index d7b0c367a2..a297c6ab9a 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/helper/errutil" "github.com/hashicorp/vault/logical" @@ -163,7 +164,7 @@ func (b *backend) pathIssueSignCert( format := getFormat(data) if format == "" { return logical.ErrorResponse( - `The "format" path parameter must be "pem", "der", or "pem_bundle"`), nil + `the "format" path parameter must be "pem", "der", or "pem_bundle"`), nil } var caErr error @@ -171,10 +172,10 @@ func (b *backend) pathIssueSignCert( switch caErr.(type) { case errutil.UserError: return nil, errutil.UserError{Err: fmt.Sprintf( - "Could not fetch the CA certificate (was one set?): %s", caErr)} + "could not fetch the CA certificate (was one set?): %s", caErr)} case errutil.InternalError: return nil, errutil.InternalError{Err: fmt.Sprintf( - "Error fetching CA certificate: %s", caErr)} + "error fetching CA certificate: %s", caErr)} } var parsedBundle *certutil.ParsedCertBundle @@ -195,12 +196,12 @@ func (b *backend) pathIssueSignCert( signingCB, err := signingBundle.ToCertBundle() if err != nil { - return nil, fmt.Errorf("Error converting raw signing bundle to cert bundle: %s", err) + return nil, errwrap.Wrapf("error converting raw signing bundle to cert bundle: {{err}}", err) } cb, err := parsedBundle.ToCertBundle() if err != nil { - return nil, fmt.Errorf("Error converting raw cert bundle to cert bundle: %s", err) + return nil, errwrap.Wrapf("error converting raw cert bundle to cert bundle: {{err}}", err) } respData := map[string]interface{}{ diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index 438c92e3ca..3f18eecae2 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -149,7 +149,7 @@ func (b *backend) pathCAGenerateRoot( cb, err := parsedBundle.ToCertBundle() if err != nil { - return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %s", err) + return nil, errwrap.Wrapf("error converting raw cert bundle to cert bundle: {{err}}", err) } resp := &logical.Response{ @@ -205,7 +205,7 @@ func (b *backend) pathCAGenerateRoot( Value: parsedBundle.CertificateBytes, }) if err != nil { - return nil, fmt.Errorf("Unable to store certificate locally: %v", err) + return nil, errwrap.Wrapf("unable to store certificate locally: {{err}}", err) } // For ease of later use, also store just the certificate at a known From ed1cbb0a789a2de3f87775444f5ee472278fcad6 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 13:11:02 -0400 Subject: [PATCH 037/329] Only copy hooks if building from a git repo Fixes #3498 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5701bd4e58..ccb556ec2e 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ vet: prep: fmtcheck @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" go generate $(go list ./... | grep -v /vendor/) - cp .hooks/* .git/hooks/ + @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi # bootstrap the build by downloading additional tools bootstrap: From 2afbbb3400b076b9319833296dcf28949142dfe6 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 14:08:30 -0400 Subject: [PATCH 038/329] Only call ConfigureTransport if "h2" is not already in NextProtos. Fixes #3435 --- api/client.go | 15 +++++++++++++-- api/client_test.go | 13 +++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/api/client.go b/api/client.go index 6e0aa18eea..d801c43975 100644 --- a/api/client.go +++ b/api/client.go @@ -282,8 +282,19 @@ func NewClient(c *Config) (*Client, error) { } if tp, ok := c.HttpClient.Transport.(*http.Transport); ok { - if err := http2.ConfigureTransport(tp); err != nil { - return nil, err + if tcc := tp.TLSClientConfig; tcc != nil { + var found bool + for _, proto := range tcc.NextProtos { + if proto == "h2" { + found = true + break + } + } + if !found { + if err := http2.ConfigureTransport(tp); err != nil { + return nil, err + } + } } } diff --git a/api/client_test.go b/api/client_test.go index 3bc866b157..a117634112 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -213,3 +213,16 @@ func TestClientNonTransportRoundTripper(t *testing.T) { t.Fatal(err) } } + +func TestClone(t *testing.T) { + client1, err1 := NewClient(nil) + if err1 != nil { + t.Fatalf("NewClient failed: %v", err1) + } + client2, err2 := client1.Clone() + if err2 != nil { + t.Fatalf("Clone failed: %v", err2) + } + + _ = client2 +} From 6df604108800ad625627c0da8fe8659ec0cb8209 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 15:06:04 -0400 Subject: [PATCH 039/329] Bump deps --- .../go/internal/version/version.go | 2 +- vendor/cloud.google.com/go/storage/acl.go | 33 +- vendor/cloud.google.com/go/storage/bucket.go | 12 +- vendor/cloud.google.com/go/storage/doc.go | 7 + .../go/storage/{go110.go.pending => go110.go} | 0 vendor/cloud.google.com/go/storage/iam.go | 35 +- .../go/storage/notifications.go | 179 + vendor/cloud.google.com/go/storage/storage.go | 14 +- .../Azure/azure-sdk-for-go/storage/README.md | 73 + .../azure-sdk-for-go/storage/appendblob.go | 27 +- .../azure-sdk-for-go/storage/authorization.go | 24 +- .../Azure/azure-sdk-for-go/storage/blob.go | 33 +- .../azure-sdk-for-go/storage/blobsasuri.go | 150 +- .../storage/blobserviceclient.go | 31 + .../azure-sdk-for-go/storage/blockblob.go | 20 +- .../Azure/azure-sdk-for-go/storage/client.go | 243 +- .../azure-sdk-for-go/storage/commonsasuri.go | 38 + .../azure-sdk-for-go/storage/container.go | 100 +- .../azure-sdk-for-go/storage/copyblob.go | 14 + .../azure-sdk-for-go/storage/directory.go | 14 + .../Azure/azure-sdk-for-go/storage/entity.go | 14 + .../Azure/azure-sdk-for-go/storage/file.go | 14 + .../storage/fileserviceclient.go | 14 + .../azure-sdk-for-go/storage/leaseblob.go | 14 + .../Azure/azure-sdk-for-go/storage/message.go | 14 + .../Azure/azure-sdk-for-go/storage/odata.go | 14 + .../azure-sdk-for-go/storage/pageblob.go | 17 +- .../Azure/azure-sdk-for-go/storage/queue.go | 14 + .../azure-sdk-for-go/storage/queuesasuri.go | 146 + .../storage/queueserviceclient.go | 14 + .../Azure/azure-sdk-for-go/storage/share.go | 14 + .../azure-sdk-for-go/storage/storagepolicy.go | 14 + .../storage/storageservice.go | 14 + .../Azure/azure-sdk-for-go/storage/table.go | 25 +- .../azure-sdk-for-go/storage/table_batch.go | 14 + .../storage/tableserviceclient.go | 14 + .../Azure/azure-sdk-for-go/storage/util.go | 38 +- .../azure-sdk-for-go/storage/util_1.7.go | 14 + .../azure-sdk-for-go/storage/util_1.8.go | 14 + .../Azure/azure-sdk-for-go/storage/version.go | 14 + .../Azure/go-ansiterm/csi_entry_state.go | 4 +- .../Azure/go-ansiterm/csi_param_state.go | 4 +- .../go-ansiterm/escape_intermediate_state.go | 4 +- .../Azure/go-ansiterm/escape_state.go | 4 +- .../Azure/go-ansiterm/osc_string_state.go | 2 +- vendor/github.com/Azure/go-ansiterm/parser.go | 101 +- .../go-ansiterm/parser_action_helpers.go | 4 - .../Azure/go-ansiterm/parser_actions.go | 17 +- .../Azure/go-ansiterm/winterm/ansi.go | 2 +- .../Azure/go-ansiterm/winterm/api.go | 25 +- .../go-ansiterm/winterm/cursor_helpers.go | 2 +- .../go-ansiterm/winterm/scroll_helper.go | 4 +- .../go-ansiterm/winterm/win_event_handler.go | 117 +- .../Azure/go-autorest/autorest/adal/config.go | 14 + .../go-autorest/autorest/adal/devicetoken.go | 14 + .../Azure/go-autorest/autorest/adal/msi.go | 14 + .../go-autorest/autorest/adal/msi_windows.go | 14 + .../go-autorest/autorest/adal/persist.go | 14 + .../Azure/go-autorest/autorest/adal/sender.go | 14 + .../Azure/go-autorest/autorest/adal/token.go | 14 + .../go-autorest/autorest/authorization.go | 14 + .../Azure/go-autorest/autorest/autorest.go | 14 + .../Azure/go-autorest/autorest/azure/async.go | 14 + .../Azure/go-autorest/autorest/azure/azure.go | 22 +- .../autorest/azure/environments.go | 14 + .../Azure/go-autorest/autorest/azure/rp.go | 202 + .../Azure/go-autorest/autorest/client.go | 25 +- .../Azure/go-autorest/autorest/date/date.go | 14 + .../Azure/go-autorest/autorest/date/time.go | 14 + .../go-autorest/autorest/date/timerfc1123.go | 14 + .../go-autorest/autorest/date/unixtime.go | 14 + .../go-autorest/autorest/date/utility.go | 14 + .../Azure/go-autorest/autorest/error.go | 14 + .../Azure/go-autorest/autorest/preparer.go | 14 + .../Azure/go-autorest/autorest/responder.go | 14 + .../go-autorest/autorest/retriablerequest.go | 14 + .../autorest/retriablerequest_1.7.go | 36 +- .../autorest/retriablerequest_1.8.go | 42 +- .../Azure/go-autorest/autorest/sender.go | 14 + .../Azure/go-autorest/autorest/utility.go | 14 + .../Azure/go-autorest/autorest/version.go | 14 + vendor/github.com/Jeffail/gabs/README.md | 23 + vendor/github.com/Jeffail/gabs/gabs.go | 69 +- .../github.com/PuerkitoBio/purell/README.md | 2 +- .../github.com/PuerkitoBio/purell/purell.go | 2 +- vendor/github.com/SAP/go-hdb/NOTICE | 0 vendor/github.com/SAP/go-hdb/driver/driver.go | 2 +- vendor/github.com/armon/go-metrics/inmem.go | 4 +- .../asaskevich/govalidator/error.go | 11 +- .../asaskevich/govalidator/patterns.go | 1 - .../asaskevich/govalidator/validator.go | 55 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 27 +- .../aws/aws-sdk-go/aws/session/env_config.go | 5 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/query/queryutil/queryutil.go | 4 + .../aws-sdk-go/service/dynamodb/doc_custom.go | 109 +- .../dynamodb/dynamodbattribute/decode.go | 44 +- .../dynamodb/dynamodbattribute/encode.go | 5 +- .../aws/aws-sdk-go/service/ec2/api.go | 1821 +- .../aws/aws-sdk-go/service/ec2/waiters.go | 5 + .../aws/aws-sdk-go/service/iam/api.go | 494 +- .../aws/aws-sdk-go/service/iam/errors.go | 9 +- .../aws/aws-sdk-go/service/s3/api.go | 18 + vendor/github.com/boombuler/barcode/README.md | 3 + vendor/github.com/cenk/backoff/backoff.go | 2 +- vendor/github.com/cenk/backoff/exponential.go | 4 +- vendor/github.com/cenk/backoff/ticker.go | 11 +- .../circonus-gometrics/CHANGELOG.md | 5 + .../circonus-gometrics/OPTIONS.md | 8 +- .../circonus-gometrics/README.md | 4 + .../circonus-gometrics/api/api.go | 36 +- .../circonus-gometrics/checkmgr/broker.go | 2 + .../circonus-gometrics/checkmgr/checkmgr.go | 99 +- .../circonus-gometrics/circonus-gometrics.go | 157 +- .../circonus-labs/circonus-gometrics/gauge.go | 86 +- .../circonus-gometrics/histogram.go | 11 + .../circonus-gometrics/submit.go | 37 +- .../circonus-labs/circonus-gometrics/tools.go | 1 - .../circonus-labs/circonus-gometrics/util.go | 12 +- .../cockroachdb/cockroach-go/crdb/error.go | 51 + .../cockroachdb/cockroach-go/crdb/tx.go | 29 +- .../github.com/coreos/etcd/client/client.go | 11 +- .../coreos/etcd/client/keys.generated.go | 4653 +- .../github.com/coreos/etcd/clientv3/auth.go | 16 +- .../coreos/etcd/clientv3/balancer.go | 154 +- .../github.com/coreos/etcd/clientv3/client.go | 18 +- .../coreos/etcd/clientv3/cluster.go | 27 +- .../coreos/etcd/clientv3/health_balancer.go | 249 + vendor/github.com/coreos/etcd/clientv3/kv.go | 23 +- .../github.com/coreos/etcd/clientv3/lease.go | 89 +- .../coreos/etcd/clientv3/maintenance.go | 30 +- .../coreos/etcd/clientv3/ready_wait.go | 30 + .../github.com/coreos/etcd/clientv3/retry.go | 376 +- vendor/github.com/coreos/etcd/clientv3/txn.go | 26 +- .../github.com/coreos/etcd/clientv3/watch.go | 2 + .../etcdserver/api/v3rpc/rpctypes/error.go | 2 +- .../coreos/etcd/pkg/transport/listener.go | 20 +- .../github.com/denisenkom/go-mssqldb/buf.go | 108 +- .../denisenkom/go-mssqldb/bulkcopy.go | 18 +- .../github.com/denisenkom/go-mssqldb/mssql.go | 132 +- .../denisenkom/go-mssqldb/mssql_go18.go | 15 + .../denisenkom/go-mssqldb/mssql_go19.go | 53 + .../denisenkom/go-mssqldb/mssql_go19pre.go | 12 + .../github.com/denisenkom/go-mssqldb/ntlm.go | 2 +- .../denisenkom/go-mssqldb/parser.go | 6 +- .../denisenkom/go-mssqldb/sspi_windows.go | 2 +- .../github.com/denisenkom/go-mssqldb/tds.go | 26 +- .../github.com/denisenkom/go-mssqldb/token.go | 62 +- .../github.com/denisenkom/go-mssqldb/types.go | 16 + .../dgrijalva/jwt-go/VERSION_HISTORY.md | 6 + .../docker/docker/api/types/client.go | 16 +- .../docker/docker/api/types/configs.go | 1 - vendor/github.com/docker/docker/opts/hosts.go | 10 +- .../docker/docker/pkg/archive/archive.go | 59 +- .../docker/docker/pkg/archive/archive_unix.go | 12 +- .../docker/pkg/archive/changes_linux.go | 2 +- .../docker/docker/pkg/archive/copy.go | 2 +- .../docker/pkg/fileutils/fileutils_solaris.go | 7 - .../docker/docker/pkg/idtools/idtools_unix.go | 38 +- .../docker/docker/pkg/jsonlog/jsonlog.go | 42 - .../docker/pkg/jsonlog/jsonlog_marshalling.go | 178 - .../docker/docker/pkg/jsonlog/jsonlogbytes.go | 122 - .../docker/pkg/jsonlog/time_marshalling.go | 27 - .../docker/pkg/jsonmessage/jsonmessage.go | 9 +- .../docker/docker/pkg/mount/mount.go | 10 +- .../docker/pkg/mount/mounter_solaris.go | 34 - .../docker/pkg/mount/mountinfo_solaris.go | 37 - .../docker/pkg/mount/sharedsubtree_solaris.go | 58 - .../docker/docker/pkg/promise/promise.go | 11 - .../docker/docker/pkg/system/exitcode.go | 14 - .../docker/docker/pkg/system/init_windows.go | 13 +- .../docker/docker/pkg/system/lcow.go | 58 + .../docker/pkg/system/meminfo_solaris.go | 129 - .../docker/docker/pkg/system/mknod.go | 2 +- .../docker/docker/pkg/system/path.go | 4 +- .../docker/pkg/system/process_windows.go | 18 + .../github.com/docker/docker/pkg/system/rm.go | 2 +- .../docker/docker/pkg/term/tc_solaris_cgo.go | 65 - .../docker/docker/pkg/term/term_windows.go | 31 +- .../docker/pkg/term/winsize_solaris_cgo.go | 42 - .../github.com/emicklei/go-restful/CHANGES.md | 3 + .../github.com/emicklei/go-restful/README.md | 2 +- .../github.com/emicklei/go-restful/jsr311.go | 22 +- .../emicklei/go-restful/options_filter.go | 10 +- .../emicklei/go-restful/parameter.go | 29 + .../emicklei/go-restful/response.go | 28 +- .../github.com/emicklei/go-restful/route.go | 9 + .../emicklei/go-restful/route_builder.go | 27 +- .../emicklei/go-restful/web_service.go | 2 +- vendor/github.com/fatih/structs/tags.go | 2 +- .../github.com/fsouza/go-dockerclient/AUTHORS | 4 + .../fsouza/go-dockerclient/Gopkg.toml | 28 + .../fsouza/go-dockerclient/Makefile | 11 +- .../fsouza/go-dockerclient/README.markdown | 10 +- .../fsouza/go-dockerclient/appveyor.yml | 9 +- .../fsouza/go-dockerclient/client.go | 13 +- .../fsouza/go-dockerclient/client_unix.go | 3 +- .../fsouza/go-dockerclient/client_windows.go | 7 +- .../fsouza/go-dockerclient/container.go | 7 +- .../fsouza/go-dockerclient/distribution.go | 26 + .../github.com/fsouza/go-dockerclient/env.go | 6 +- vendor/github.com/go-ini/ini/LICENSE | 2 +- vendor/github.com/go-ini/ini/Makefile | 5 +- vendor/github.com/go-ini/ini/README.md | 29 +- vendor/github.com/go-ini/ini/README_ZH.md | 29 +- vendor/github.com/go-ini/ini/file.go | 392 + vendor/github.com/go-ini/ini/ini.go | 376 +- vendor/github.com/go-ini/ini/key.go | 40 +- vendor/github.com/go-ini/ini/parser.go | 25 +- vendor/github.com/go-ini/ini/section.go | 9 + vendor/github.com/go-ini/ini/struct.go | 16 +- vendor/github.com/go-ldap/ldap/Makefile | 2 +- vendor/github.com/go-ldap/ldap/bind.go | 6 +- vendor/github.com/go-ldap/ldap/compare.go | 4 - vendor/github.com/go-ldap/ldap/conn.go | 54 +- vendor/github.com/go-ldap/ldap/control.go | 4 - vendor/github.com/go-ldap/ldap/debug.go | 2 +- vendor/github.com/go-ldap/ldap/dn.go | 8 +- vendor/github.com/go-ldap/ldap/filter.go | 4 - vendor/github.com/go-ldap/ldap/ldap.go | 6 +- vendor/github.com/go-ldap/ldap/modify.go | 4 - .../github.com/go-ldap/ldap/passwdmodify.go | 8 +- vendor/github.com/go-ldap/ldap/search.go | 4 - vendor/github.com/go-openapi/spec/expander.go | 24 +- vendor/github.com/go-openapi/spec/items.go | 11 +- .../github.com/go-openapi/spec/refmodifier.go | 82 + vendor/github.com/go-sql-driver/mysql/AUTHORS | 1 + .../github.com/go-sql-driver/mysql/README.md | 58 +- .../go-sql-driver/mysql/connection.go | 2 +- .../github.com/go-sql-driver/mysql/const.go | 9 +- .../github.com/go-sql-driver/mysql/driver.go | 1 - vendor/github.com/go-sql-driver/mysql/dsn.go | 96 +- .../github.com/go-sql-driver/mysql/errors.go | 73 - .../github.com/go-sql-driver/mysql/fields.go | 140 + .../github.com/go-sql-driver/mysql/packets.go | 43 +- vendor/github.com/go-sql-driver/mysql/rows.go | 51 +- vendor/github.com/gocql/gocql/AUTHORS | 4 + vendor/github.com/gocql/gocql/README.md | 4 +- vendor/github.com/gocql/gocql/cluster.go | 4 + vendor/github.com/gocql/gocql/conn.go | 69 +- vendor/github.com/gocql/gocql/control.go | 113 +- vendor/github.com/gocql/gocql/events.go | 83 +- vendor/github.com/gocql/gocql/frame.go | 79 +- vendor/github.com/gocql/gocql/helpers.go | 33 +- vendor/github.com/gocql/gocql/host_source.go | 221 +- vendor/github.com/gocql/gocql/marshal.go | 79 +- vendor/github.com/gocql/gocql/policies.go | 83 +- .../github.com/gocql/gocql/query_executor.go | 14 +- vendor/github.com/gocql/gocql/ring.go | 2 + vendor/github.com/gocql/gocql/session.go | 58 +- .../protoc-gen-go/descriptor/descriptor.pb.go | 425 +- .../protoc-gen-go/descriptor/descriptor.proto | 28 +- .../golang/protobuf/ptypes/any/any.pb.go | 10 + .../golang/protobuf/ptypes/any/any.proto | 10 + .../go-github/github/apps_installation.go | 37 +- .../go-github/github/github-accessors.go | 32 + .../google/go-github/github/github.go | 6 +- .../google/go-github/github/issues.go | 14 +- .../google/go-github/github/messages.go | 41 +- .../google/go-github/github/orgs_members.go | 2 +- .../google/go-github/github/orgs_teams.go | 3 + .../google/go-github/github/projects.go | 6 + .../google/go-github/github/repos.go | 20 +- .../github.com/hashicorp/consul/api/agent.go | 3 +- vendor/github.com/hashicorp/consul/api/api.go | 21 +- vendor/github.com/hashicorp/consul/api/kv.go | 30 +- .../github.com/hashicorp/consul/api/lock.go | 4 +- .../hashicorp/consul/api/operator_area.go | 25 + .../consul/api/operator_autopilot.go | 2 +- .../hashicorp/consul/api/operator_raft.go | 5 +- .../hashicorp/consul/api/semaphore.go | 4 +- vendor/github.com/hashicorp/consul/lib/eof.go | 27 + vendor/github.com/hashicorp/go-hclog/int.go | 9 +- vendor/github.com/hashicorp/go-hclog/log.go | 4 + vendor/github.com/hashicorp/hcl/decoder.go | 31 +- .../hashicorp/hcl/hcl/scanner/scanner.go | 2 +- .../hashicorp/hcl/json/scanner/scanner.go | 2 +- .../hashicorp/serf/coordinate/client.go | 5 + .../vault-plugin-auth-gcp/Gopkg.lock | 207 - .../vault-plugin-auth-gcp/Gopkg.toml | 46 - .../hashicorp/vault-plugin-auth-gcp/Makefile | 58 - .../hashicorp/vault-plugin-auth-gcp/README.md | 149 - .../hashicorp/vault-plugin-auth-gcp/main.go | 28 - .../plugin/path_login.go | 2 +- .../plugin/util/iamutil.go | 2 +- .../vault-plugin-auth-kubernetes/Gopkg.lock | 2 +- .../keybase/go-crypto/openpgp/armor/armor.go | 13 + vendor/github.com/lib/pq/README.md | 1 + vendor/github.com/lib/pq/conn.go | 66 +- vendor/github.com/lib/pq/doc.go | 32 +- vendor/github.com/lib/pq/encode.go | 12 +- vendor/github.com/lib/pq/notify.go | 56 +- vendor/github.com/lib/pq/oid/types.go | 172 +- vendor/github.com/lib/pq/rows.go | 93 + .../mailru/easyjson/jlexer/lexer.go | 27 + .../mailru/easyjson/jwriter/writer.go | 7 + .../mattn/go-isatty/isatty_linux.go | 2 +- .../mattn/go-isatty/isatty_linux_ppc64x.go | 19 + .../go-testing-interface/testing_go19.go | 60 +- .../mitchellh/mapstructure/mapstructure.go | 22 +- vendor/github.com/ncw/swift/largeobjects.go | 9 +- vendor/github.com/ncw/swift/swift.go | 2 + vendor/github.com/ncw/swift/timeout_reader.go | 4 +- ...scall_linux_arm.go => syscall_linux_32.go} | 3 +- .../libcontainer/system/syscall_linux_386.go | 25 - .../libcontainer/system/syscall_linux_64.go | 3 +- .../github.com/samuel/go-zookeeper/zk/conn.go | 10 + vendor/github.com/spf13/pflag/count.go | 12 +- vendor/github.com/spf13/pflag/flag.go | 53 +- vendor/github.com/spf13/pflag/int16.go | 88 + vendor/github.com/tv42/httpunix/LICENSE | 19 + vendor/github.com/tv42/httpunix/httpunix.go | 95 + vendor/github.com/ugorji/go/codec/0doc.go | 113 +- vendor/github.com/ugorji/go/codec/README.md | 61 +- vendor/github.com/ugorji/go/codec/binc.go | 36 +- vendor/github.com/ugorji/go/codec/cbor.go | 107 +- vendor/github.com/ugorji/go/codec/decode.go | 2456 +- vendor/github.com/ugorji/go/codec/encode.go | 1384 +- .../ugorji/go/codec/fast-path.generated.go | 38869 +++++++--------- .../ugorji/go/codec/fast-path.go.tmpl | 381 +- .../ugorji/go/codec/fast-path.not.go | 13 +- .../ugorji/go/codec/gen-dec-array.go.tmpl | 105 +- .../ugorji/go/codec/gen-dec-map.go.tmpl | 42 +- .../ugorji/go/codec/gen-helper.generated.go | 48 +- .../ugorji/go/codec/gen-helper.go.tmpl | 202 +- .../ugorji/go/codec/gen.generated.go | 147 +- vendor/github.com/ugorji/go/codec/gen.go | 360 +- ...de_go.go => goversion_arrayof_gte_go15.go} | 6 +- ...e_go14.go => goversion_arrayof_lt_go15.go} | 4 +- .../go/codec/goversion_makemap_gte_go19.go | 15 + .../go/codec/goversion_makemap_lt_go19.go | 12 + .../go/codec/goversion_unsupported_lt_go14.go | 17 + ...{gen_15.go => goversion_vendor_eq_go15.go} | 4 +- ...{gen_16.go => goversion_vendor_eq_go16.go} | 6 +- ...gen_17.go => goversion_vendor_gte_go17.go} | 4 +- .../go/codec/goversion_vendor_lt_go15.go | 8 + vendor/github.com/ugorji/go/codec/helper.go | 1107 +- .../ugorji/go/codec/helper_internal.go | 53 - .../ugorji/go/codec/helper_not_unsafe.go | 142 +- .../ugorji/go/codec/helper_unsafe.go | 390 +- vendor/github.com/ugorji/go/codec/json.go | 1305 +- .../ugorji/go/codec/mammoth-test.go.tmpl | 100 + vendor/github.com/ugorji/go/codec/msgpack.go | 82 +- vendor/github.com/ugorji/go/codec/noop.go | 213 - vendor/github.com/ugorji/go/codec/prebuild.go | 3 - vendor/github.com/ugorji/go/codec/prebuild.sh | 205 - vendor/github.com/ugorji/go/codec/rpc.go | 22 +- vendor/github.com/ugorji/go/codec/simple.go | 28 +- vendor/github.com/ugorji/go/codec/tests.sh | 107 - vendor/github.com/ugorji/go/codec/time.go | 57 +- vendor/golang.org/x/crypto/ssh/buffer.go | 5 +- vendor/golang.org/x/net/context/context.go | 2 + vendor/golang.org/x/net/http2/server.go | 42 +- vendor/golang.org/x/net/http2/transport.go | 20 +- vendor/golang.org/x/net/http2/write.go | 7 +- vendor/golang.org/x/net/idna/idna.go | 109 +- vendor/golang.org/x/net/idna/tables.go | 4396 +- vendor/golang.org/x/net/idna/trieval.go | 17 +- vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 1 - vendor/golang.org/x/oauth2/internal/token.go | 1 - .../golang.org/x/oauth2/internal/transport.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 2 +- vendor/golang.org/x/sys/unix/env_unset.go | 2 +- vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 2 +- .../x/sys/unix/gccgo_linux_amd64.go | 2 +- vendor/golang.org/x/sys/unix/mkall.sh | 3 - vendor/golang.org/x/sys/unix/mkerrors.sh | 6 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 2 +- vendor/golang.org/x/sys/unix/race.go | 2 +- vendor/golang.org/x/sys/unix/race0.go | 2 +- .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- vendor/golang.org/x/sys/unix/syscall.go | 24 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 12 +- .../x/sys/unix/syscall_darwin_386.go | 17 +- .../x/sys/unix/syscall_darwin_amd64.go | 17 +- .../x/sys/unix/syscall_darwin_arm.go | 17 +- .../x/sys/unix/syscall_darwin_arm64.go | 17 +- .../x/sys/unix/syscall_dragonfly.go | 11 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 15 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 11 +- .../x/sys/unix/syscall_freebsd_386.go | 15 +- .../x/sys/unix/syscall_freebsd_amd64.go | 15 +- .../x/sys/unix/syscall_freebsd_arm.go | 15 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 19 +- .../x/sys/unix/syscall_linux_386.go | 19 +- .../x/sys/unix/syscall_linux_amd64.go | 15 +- .../x/sys/unix/syscall_linux_arm.go | 15 +- .../x/sys/unix/syscall_linux_arm64.go | 22 +- .../x/sys/unix/syscall_linux_mips64x.go | 22 +- .../x/sys/unix/syscall_linux_mipsx.go | 15 +- .../x/sys/unix/syscall_linux_ppc64x.go | 17 +- .../x/sys/unix/syscall_linux_s390x.go | 15 +- .../x/sys/unix/syscall_linux_sparc64.go | 15 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 10 +- .../x/sys/unix/syscall_netbsd_386.go | 15 +- .../x/sys/unix/syscall_netbsd_amd64.go | 15 +- .../x/sys/unix/syscall_netbsd_arm.go | 15 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 10 +- .../x/sys/unix/syscall_openbsd_386.go | 15 +- .../x/sys/unix/syscall_openbsd_amd64.go | 15 +- .../x/sys/unix/syscall_openbsd_arm.go | 15 +- .../golang.org/x/sys/unix/syscall_solaris.go | 23 +- .../x/sys/unix/syscall_solaris_amd64.go | 15 +- vendor/golang.org/x/sys/unix/timestruct.go | 62 + .../x/sys/unix/zerrors_freebsd_386.go | 43 + .../x/sys/unix/zerrors_freebsd_amd64.go | 43 + .../x/sys/unix/zerrors_freebsd_arm.go | 43 + .../x/sys/unix/zerrors_linux_386.go | 15 +- .../x/sys/unix/zerrors_linux_amd64.go | 17 +- .../x/sys/unix/zerrors_linux_arm.go | 15 +- .../x/sys/unix/zerrors_linux_arm64.go | 17 +- .../x/sys/unix/zerrors_linux_mips.go | 15 +- .../x/sys/unix/zerrors_linux_mips64.go | 17 +- .../x/sys/unix/zerrors_linux_mips64le.go | 17 +- .../x/sys/unix/zerrors_linux_mipsle.go | 15 +- .../x/sys/unix/zerrors_linux_ppc64.go | 17 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 17 +- .../x/sys/unix/zerrors_linux_s390x.go | 17 +- .../x/sys/unix/zerrors_solaris_amd64.go | 6 + .../golang.org/x/sys/unix/zptrace386_linux.go | 80 + .../golang.org/x/sys/unix/zptracearm_linux.go | 41 + .../x/sys/unix/zptracemips_linux.go | 50 + .../x/sys/unix/zptracemipsle_linux.go | 50 + .../x/sys/unix/zsyscall_darwin_386.go | 11 + .../x/sys/unix/zsyscall_darwin_amd64.go | 11 + .../x/sys/unix/zsyscall_darwin_arm.go | 11 + .../x/sys/unix/zsyscall_darwin_arm64.go | 11 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 11 + .../x/sys/unix/zsyscall_freebsd_386.go | 11 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 11 + .../x/sys/unix/zsyscall_freebsd_arm.go | 11 + .../x/sys/unix/zsyscall_linux_386.go | 11 + .../x/sys/unix/zsyscall_linux_amd64.go | 11 + .../x/sys/unix/zsyscall_linux_arm.go | 11 + .../x/sys/unix/zsyscall_linux_arm64.go | 22 +- .../x/sys/unix/zsyscall_linux_mips.go | 11 + .../x/sys/unix/zsyscall_linux_mips64.go | 22 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 22 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 11 + .../x/sys/unix/zsyscall_linux_ppc64.go | 13 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 13 +- .../x/sys/unix/zsyscall_linux_s390x.go | 11 + .../x/sys/unix/zsyscall_netbsd_386.go | 11 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm.go | 11 + .../x/sys/unix/zsyscall_openbsd_386.go | 11 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_openbsd_arm.go | 11 + .../x/sys/unix/zsyscall_solaris_amd64.go | 42 + ...sctl_openbsd.go => zsysctl_openbsd_386.go} | 0 .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../x/sys/unix/zsysctl_openbsd_arm.go | 270 + .../x/sys/unix/ztypes_darwin_386.go | 19 + .../x/sys/unix/ztypes_darwin_amd64.go | 19 + .../x/sys/unix/ztypes_darwin_arm.go | 19 + .../x/sys/unix/ztypes_darwin_arm64.go | 24 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 19 + .../x/sys/unix/ztypes_freebsd_386.go | 20 + .../x/sys/unix/ztypes_freebsd_amd64.go | 20 + .../x/sys/unix/ztypes_freebsd_arm.go | 20 + .../x/sys/unix/ztypes_linux_sparc64.go | 2 - .../x/sys/unix/ztypes_netbsd_386.go | 23 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 23 +- .../x/sys/unix/ztypes_netbsd_arm.go | 23 +- .../x/sys/unix/ztypes_openbsd_386.go | 23 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 23 +- .../x/sys/unix/ztypes_openbsd_arm.go | 23 +- .../x/sys/unix/ztypes_solaris_amd64.go | 2 - .../golang.org/x/sys/windows/dll_windows.go | 9 +- vendor/golang.org/x/sys/windows/env_unset.go | 2 +- .../golang.org/x/sys/windows/env_windows.go | 2 +- .../x/sys/windows/memory_windows.go | 2 +- vendor/golang.org/x/sys/windows/mksyscall.go | 2 +- vendor/golang.org/x/sys/windows/race.go | 2 +- vendor/golang.org/x/sys/windows/race0.go | 2 +- .../x/sys/windows/security_windows.go | 2 +- vendor/golang.org/x/sys/windows/syscall.go | 6 +- .../x/sys/windows/syscall_windows.go | 17 +- .../golang.org/x/sys/windows/types_windows.go | 2 +- .../x/sys/windows/types_windows_386.go | 2 +- .../x/sys/windows/types_windows_amd64.go | 2 +- vendor/golang.org/x/text/width/gen.go | 115 - vendor/golang.org/x/text/width/gen_common.go | 96 - vendor/golang.org/x/text/width/gen_trieval.go | 34 - vendor/golang.org/x/text/width/tables.go | 318 +- .../api/compute/v1/compute-api.json | 7052 ++- .../api/compute/v1/compute-gen.go | 11053 ++++- .../google.golang.org/api/iam/v1/iam-api.json | 2156 +- .../google.golang.org/api/iam/v1/iam-gen.go | 49 +- .../api/internal/settings.go | 17 + .../api/oauth2/v2/oauth2-api.json | 2 +- vendor/google.golang.org/api/option/option.go | 13 + .../api/storage/v1/storage-api.json | 109 +- .../api/storage/v1/storage-gen.go | 235 +- .../api/transport/http/dial.go | 45 +- .../googleapis/rpc/status/status.pb.go | 8 +- vendor/google.golang.org/grpc/Makefile | 4 +- vendor/google.golang.org/grpc/README.md | 5 +- vendor/google.golang.org/grpc/backoff.go | 14 +- vendor/google.golang.org/grpc/balancer.go | 2 +- .../grpc/balancer/balancer.go | 14 +- .../grpc/balancer/roundrobin/roundrobin.go | 241 + .../grpc/balancer_conn_wrappers.go | 177 +- .../grpc/balancer_v1_wrapper.go | 128 +- vendor/google.golang.org/grpc/call.go | 40 +- vendor/google.golang.org/grpc/clientconn.go | 562 +- vendor/google.golang.org/grpc/codec.go | 18 +- .../grpc/credentials/credentials.go | 22 +- vendor/google.golang.org/grpc/go17.go | 3 +- vendor/google.golang.org/grpc/grpclb.go | 1 + .../grpclb/grpc_lb_v1/messages/messages.proto | 2 +- .../google.golang.org/grpc/picker_wrapper.go | 141 + vendor/google.golang.org/grpc/pickfirst.go | 105 + .../grpc/resolver/dns/dns_resolver.go | 377 + .../grpc/resolver/dns/go17.go | 35 + .../grpc/resolver/dns/go18.go | 29 + .../grpc/resolver/passthrough/passthrough.go | 57 + .../grpc/resolver/resolver.go | 19 +- .../grpc/resolver_conn_wrapper.go | 144 + vendor/google.golang.org/grpc/rpc_util.go | 99 +- vendor/google.golang.org/grpc/server.go | 65 +- .../google.golang.org/grpc/service_config.go | 199 + vendor/google.golang.org/grpc/stats/stats.go | 10 +- vendor/google.golang.org/grpc/stream.go | 55 +- .../grpc/transport/bdp_estimator.go | 15 +- .../grpc/transport/control.go | 5 +- .../grpc/transport/handler_server.go | 5 +- .../grpc/transport/http2_client.go | 346 +- .../grpc/transport/http2_server.go | 325 +- .../grpc/transport/http_util.go | 9 +- .../grpc/transport/transport.go | 79 +- vendor/google.golang.org/grpc/vet.sh | 7 +- .../k8s.io/apimachinery/pkg/api/errors/BUILD | 19 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 3 +- .../apimachinery/pkg/api/errors/errors.go | 105 +- .../apimachinery/pkg/api/resource/BUILD | 31 +- .../apimachinery/pkg/api/resource/OWNERS | 2 +- .../pkg/api/resource/generated.proto | 1 + .../apimachinery/pkg/api/resource/quantity.go | 5 +- .../pkg/api/resource/zz_generated.deepcopy.go | 44 + .../apimachinery/pkg/apis/meta/v1/BUILD | 36 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 18 + .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/generated.pb.go | 707 +- .../pkg/apis/meta/v1/generated.proto | 70 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 23 +- .../pkg/apis/meta/v1/micro_time.go | 22 +- .../apimachinery/pkg/apis/meta/v1/time.go | 12 +- .../pkg/apis/meta/v1/time_proto.go | 11 +- .../apimachinery/pkg/apis/meta/v1/types.go | 98 +- .../meta/v1/types_swagger_doc_generated.go | 21 +- .../apimachinery/pkg/apis/meta/v1/watch.go | 9 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1398 +- .../k8s.io/apimachinery/pkg/conversion/BUILD | 23 +- .../pkg/conversion/queryparams/BUILD | 19 +- .../pkg/conversion/queryparams/convert.go | 9 +- vendor/k8s.io/apimachinery/pkg/fields/BUILD | 19 +- .../apimachinery/pkg/fields/selector.go | 32 + vendor/k8s.io/apimachinery/pkg/labels/BUILD | 21 +- .../apimachinery/pkg/labels/selector.go | 20 + .../pkg/labels/zz_generated.deepcopy.go | 59 + vendor/k8s.io/apimachinery/pkg/openapi/BUILD | 21 - vendor/k8s.io/apimachinery/pkg/runtime/BUILD | 33 +- .../apimachinery/pkg/runtime/conversion.go | 15 + .../apimachinery/pkg/runtime/embedded.go | 6 + .../apimachinery/pkg/runtime/extension.go | 5 +- .../apimachinery/pkg/runtime/generated.proto | 3 +- .../apimachinery/pkg/runtime/interfaces.go | 16 +- .../apimachinery/pkg/runtime/register.go | 2 +- .../apimachinery/pkg/runtime/schema/BUILD | 25 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 42 +- .../pkg/runtime/swagger_doc_generator.go | 2 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 6 +- .../pkg/runtime/zz_generated.deepcopy.go | 130 +- .../k8s.io/apimachinery/pkg/selection/BUILD | 15 +- vendor/k8s.io/apimachinery/pkg/types/BUILD | 15 +- .../k8s.io/apimachinery/pkg/util/errors/BUILD | 17 +- .../apimachinery/pkg/util/errors/errors.go | 2 +- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 27 +- .../apimachinery/pkg/util/intstr/intstr.go | 2 +- vendor/k8s.io/apimachinery/pkg/util/net/BUILD | 19 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 37 +- .../apimachinery/pkg/util/net/interface.go | 314 +- .../apimachinery/pkg/util/runtime/BUILD | 19 +- .../k8s.io/apimachinery/pkg/util/sets/BUILD | 26 +- .../apimachinery/pkg/util/validation/BUILD | 22 +- .../pkg/util/validation/field/BUILD | 19 +- .../pkg/util/validation/field/errors.go | 7 +- .../pkg/util/validation/validation.go | 64 +- .../k8s.io/apimachinery/pkg/util/wait/BUILD | 19 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 36 + vendor/k8s.io/apimachinery/pkg/watch/BUILD | 33 +- vendor/k8s.io/apimachinery/pkg/watch/mux.go | 7 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 5 +- .../pkg/watch/zz_generated.deepcopy.go | 59 + .../third_party/forked/golang/reflect/BUILD | 22 +- .../third_party/forked/golang/reflect/type.go | 91 - vendor/k8s.io/client-go/LICENSE | 202 - .../client-go/pkg/apis/authentication/BUILD | 25 - .../client-go/pkg/apis/authentication/OWNERS | 9 - .../client-go/pkg/apis/authentication/doc.go | 18 - .../pkg/apis/authentication/register.go | 50 - .../pkg/apis/authentication/types.go | 89 - .../pkg/apis/authentication/v1/BUILD | 34 - .../pkg/apis/authentication/v1/conversion.go | 26 - .../pkg/apis/authentication/v1/defaults.go | 25 - .../pkg/apis/authentication/v1/doc.go | 18 - .../apis/authentication/v1/generated.pb.go | 1301 - .../apis/authentication/v1/generated.proto | 99 - .../pkg/apis/authentication/v1/register.go | 58 - .../pkg/apis/authentication/v1/types.go | 106 - .../v1/types_swagger_doc_generated.go | 72 - .../v1/zz_generated.conversion.go | 153 - .../v1/zz_generated.deepcopy.go | 110 - .../v1/zz_generated.defaults.go | 32 - .../authentication/zz_generated.deepcopy.go | 110 - .../{apimachinery => kube-openapi}/LICENSE | 0 .../pkg/common}/common.go | 57 +- .../pkg/common}/doc.go | 5 +- vendor/vendor.json | 1463 +- 625 files changed, 64851 insertions(+), 41030 deletions(-) rename vendor/cloud.google.com/go/storage/{go110.go.pending => go110.go} (100%) create mode 100644 vendor/cloud.google.com/go/storage/notifications.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go mode change 100755 => 100644 vendor/github.com/SAP/go-hdb/NOTICE create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md create mode 100644 vendor/github.com/cockroachdb/cockroach-go/crdb/error.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/health_balancer.go create mode 100644 vendor/github.com/coreos/etcd/clientv3/ready_wait.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/promise/promise.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go create mode 100644 vendor/github.com/fsouza/go-dockerclient/Gopkg.toml create mode 100644 vendor/github.com/fsouza/go-dockerclient/distribution.go create mode 100644 vendor/github.com/go-ini/ini/file.go create mode 100644 vendor/github.com/go-openapi/spec/refmodifier.go create mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go create mode 100644 vendor/github.com/hashicorp/consul/lib/eof.go delete mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.lock delete mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.toml delete mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-gcp/Makefile delete mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-gcp/README.md delete mode 100644 vendor/github.com/hashicorp/vault-plugin-auth-gcp/main.go create mode 100644 vendor/github.com/lib/pq/rows.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go rename vendor/github.com/opencontainers/runc/libcontainer/system/{syscall_linux_arm.go => syscall_linux_32.go} (93%) delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go create mode 100644 vendor/github.com/spf13/pflag/int16.go create mode 100644 vendor/github.com/tv42/httpunix/LICENSE create mode 100644 vendor/github.com/tv42/httpunix/httpunix.go rename vendor/github.com/ugorji/go/codec/{decode_go.go => goversion_arrayof_gte_go15.go} (59%) rename vendor/github.com/ugorji/go/codec/{decode_go14.go => goversion_arrayof_lt_go15.go} (64%) create mode 100644 vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go create mode 100644 vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go create mode 100644 vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go rename vendor/github.com/ugorji/go/codec/{gen_15.go => goversion_vendor_eq_go15.go} (72%) rename vendor/github.com/ugorji/go/codec/{gen_16.go => goversion_vendor_eq_go16.go} (65%) rename vendor/github.com/ugorji/go/codec/{gen_17.go => goversion_vendor_gte_go17.go} (82%) create mode 100644 vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go create mode 100644 vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go delete mode 100755 vendor/github.com/ugorji/go/codec/prebuild.sh delete mode 100755 vendor/github.com/ugorji/go/codec/tests.sh create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace386_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracearm_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemips_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptracemipsle_linux.go rename vendor/golang.org/x/sys/unix/{zsysctl_openbsd.go => zsysctl_openbsd_386.go} (100%) create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 vendor/golang.org/x/text/width/gen.go delete mode 100644 vendor/golang.org/x/text/width/gen_common.go delete mode 100644 vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go17.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/go18.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/openapi/BUILD create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go delete mode 100644 vendor/k8s.io/client-go/LICENSE delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/BUILD delete mode 100755 vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go rename vendor/k8s.io/{apimachinery => kube-openapi}/LICENSE (100%) rename vendor/k8s.io/{apimachinery/pkg/openapi => kube-openapi/pkg/common}/common.go (85%) rename vendor/k8s.io/{apimachinery/pkg/openapi => kube-openapi/pkg/common}/doc.go (83%) diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 321f3c866f..513afa4607 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20170915" +const Repo = "20170928" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index a1b2b6d3dd..24f90c924a 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -15,7 +15,6 @@ package storage import ( - "fmt" "net/http" "reflect" @@ -106,21 +105,17 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { return err }) if err != nil { - return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) + return nil, err } return toACLRules(acls.Items), nil } func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - err := runWithRetry(ctx, func() error { + return runWithRetry(ctx, func() error { req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) a.configureCall(req, ctx) return req.Do() }) - if err != nil { - return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) - } - return nil } func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { @@ -133,7 +128,7 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { return err }) if err != nil { - return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) + return nil, err } r := make([]ACLRule, len(acls.Items)) for i, v := range acls.Items { @@ -156,7 +151,7 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol return err }) if err != nil { - return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + return err } return nil } @@ -168,7 +163,7 @@ func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { return req.Do() }) if err != nil { - return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + return err } return nil } @@ -183,7 +178,7 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { return err }) if err != nil { - return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) + return nil, err } return toACLRules(acls.Items), nil } @@ -206,30 +201,18 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) } a.configureCall(req, ctx) - err := runWithRetry(ctx, func() error { + return runWithRetry(ctx, func() error { _, err := req.Do() return err }) - if err != nil { - if isBucketDefault { - return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) - } else { - return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err) - } - } - return nil } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - err := runWithRetry(ctx, func() error { + return runWithRetry(ctx, func() error { req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) a.configureCall(req, ctx) return req.Do() }) - if err != nil { - return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) - } - return nil } func (a *ACLHandle) configureCall(call interface { diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 07852a5078..fcaa59db08 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -35,7 +35,7 @@ type BucketHandle struct { acl ACLHandle defaultObjectACL ACLHandle conds *BucketConditions - userProject string // project for requester-pays buckets + userProject string // project for Requester Pays buckets } // Bucket returns a BucketHandle, which provides operations on the named bucket. @@ -237,6 +237,9 @@ type BucketAttrs struct { Labels map[string]string // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. RequesterPays bool // Lifecycle is the lifecycle configuration for objects in the bucket. Lifecycle Lifecycle @@ -506,8 +509,10 @@ func (c *BucketConditions) validate(method string) error { } // UserProject returns a new BucketHandle that passes the project ID as the user -// project for all subsequent calls. A user project is required for all operations -// on requester-pays buckets. +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. func (b *BucketHandle) UserProject(projectID string) *BucketHandle { b2 := *b b2.userProject = projectID @@ -604,6 +609,7 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { if rr.Condition.CreatedBefore != "" { r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) } + l.Rules = append(l.Rules, r) } return l } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 951391f548..06961b6f8c 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -36,6 +36,13 @@ To start working with this package, create a client: // TODO: Handle error. } +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + Buckets A Google Cloud Storage bucket is a collection of objects. To work with a diff --git a/vendor/cloud.google.com/go/storage/go110.go.pending b/vendor/cloud.google.com/go/storage/go110.go similarity index 100% rename from vendor/cloud.google.com/go/storage/go110.go.pending rename to vendor/cloud.google.com/go/storage/go110.go diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 6607d8cc2c..9365509ed7 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -23,21 +23,28 @@ import ( // IAM provides access to IAM access control for the bucket. func (b *BucketHandle) IAM() *iam.Handle { - return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name) + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) } // iamClient implements the iam.client interface. type iamClient struct { - raw *raw.Service + raw *raw.Service + userProject string } func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { - req := c.raw.Buckets.GetIamPolicy(resource) - setClientHeader(req.Header()) + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } var rp *raw.Policy var err error err = runWithRetry(ctx, func() error { - rp, err = req.Context(ctx).Do() + rp, err = call.Context(ctx).Do() return err }) if err != nil { @@ -48,21 +55,27 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { rp := iamToStoragePolicy(p) - req := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(req.Header()) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } return runWithRetry(ctx, func() error { - _, err := req.Context(ctx).Do() + _, err := call.Context(ctx).Do() return err }) } func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - req := c.raw.Buckets.TestIamPermissions(resource, perms) - setClientHeader(req.Header()) + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } var res *raw.TestIamPermissionsResponse var err error err = runWithRetry(ctx, func() error { - res, err = req.Context(ctx).Do() + res, err = call.Context(ctx).Do() return err }) if err != nil { diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 0000000000..b95dd453a5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,179 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "regexp" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) { + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) { + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + var err error + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error { + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 1b0641beda..8ffa84536b 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -110,7 +110,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // // Close need not be called at program exit. func (c *Client) Close() error { + // Set fields to nil so that subsequent uses + // will panic. c.hc = nil + c.raw = nil return nil } @@ -167,7 +170,7 @@ type SignedURLOptions struct { // Optional. ContentType string - // Headers is a list of extention headers the client must provide + // Headers is a list of extension headers the client must provide // in order to use the generated signed URL. // Optional. Headers []string @@ -635,11 +638,10 @@ func (o *ObjectHandle) validate() error { return nil } -// parseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. func parseKey(key []byte) (*rsa.PrivateKey, error) { if block, _ := pem.Decode(key); block != nil { key = block.Bytes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 0000000000..6dc348e02a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,73 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform REST operations against the [Azure Storage Service](https://docs.microsoft.com/en-us/azure/storage/). To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](https://github.com/Azure/azure-sdk-for-go/tree/master/management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + +# Getting Started + + 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` + 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). + - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. + - This option is production ready, but can also be used for development. + 1. (Optional, Windows only) Download and start the [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/). + 1. Checkout our existing [samples](https://github.com/Azure-Samples?q=Storage&language=go). + +# Contributing + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +When contributing, please conform to the following practices: + - Run [gofmt](https://golang.org/cmd/gofmt/) to use standard go formatting. + - Run [golint](https://github.com/golang/lint) to conform to standard naming conventions. + - Run [go vet](https://golang.org/cmd/vet/) to catch common Go mistakes. + - Use [GoASTScanner/gas](https://github.com/GoASTScanner/gas) to ensure there are no common security violations in your contribution. + - Run [go test](https://golang.org/cmd/go/#hdr-Test_packages) to catch possible bugs in the code: `go test ./storage/...`. + - This project uses HTTP recordings for testing. + - The recorder should be attached to the client before calling the functions to test and later stopped. + - If you updated an existing test, its recording might need to be updated. Run `go test ./storage/... -ow -check.f TestName` to rerecord the test. + - Important note: all HTTP requests in the recording must be unique: different bodies, headers (`User-Agent`, `Authorization` and `Date` or `x-ms-date` headers are ignored), URLs and methods. As opposed to the example above, the following test is not suitable for recording: + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue := cli.GetQueueReference(queueName(c)) +ok, err := queue.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +c.Assert(queue.Create(nil), chk.IsNil) +defer queue.Delete(nil) + +ok, err = queue.Exists() // This is the very same request as the one 5 lines above +// The test replayer gets confused and the test fails in the last line +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` + + - On the other side, this test does not repeat requests: the URLs are different. + +``` go +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { +cli := getQueueClient(c) +rec := cli.client.appendRecorder(c) +defer rec.Stop() + +queue1 := cli.GetQueueReference(queueName(c, "nonexistent")) +ok, err := queue1.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, false) + +queue2 := cli.GetQueueReference(queueName(c, "exisiting")) +c.Assert(queue2.Create(nil), chk.IsNil) +defer queue2.Delete(nil) + +ok, err = queue2.Exists() +c.Assert(err, chk.IsNil) +c.Assert(ok, chk.Equals, true) +} +``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go index c13d409b78..8b5b96d488 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -1,7 +1,23 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" + "crypto/md5" + "encoding/base64" "fmt" "net/http" "net/url" @@ -31,8 +47,7 @@ func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeAppend) } // AppendBlockOptions includes the options for an append block operation @@ -46,6 +61,7 @@ type AppendBlockOptions struct { IfMatch string `header:"If-Match"` IfNoneMatch string `header:"If-None-Match"` RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool } // AppendBlock appends a block to an append blob. @@ -60,6 +76,10 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { if options != nil { params = addTimeout(params, options.Timeout) headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } } uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) @@ -67,6 +87,5 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeAppend) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go index d4ad5da273..76794c3051 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -1,6 +1,20 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -41,11 +55,13 @@ const ( ) func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { - authHeader, err := c.getSharedKey(verb, url, headers, auth) - if err != nil { - return nil, err + if !c.sasClient { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader } - headers[headerAuthorization] = authHeader return headers, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 12f61ac2ac..a9d3cfccb6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -90,7 +104,7 @@ type BlobProperties struct { CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` + BlobType BlobType `xml:"BlobType"` SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` CopyID string `xml:"CopyId"` CopyStatus string `xml:"CopyStatus"` @@ -135,8 +149,7 @@ func (b *Blob) Exists() (bool, error) { } // GetURL gets the canonical URL to the blob with the specified name in the -// specified container. If name is not specified, the canonical URL for the entire -// container is obtained. +// specified container. // This method does not create a publicly accessible URL if the blob or container // is private and this method does not check if the blob exists. func (b *Blob) GetURL() string { @@ -437,8 +450,8 @@ func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) if b.Properties.BlobType == BlobTypePage { - headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("byte %v", b.Properties.ContentLength)) - if options != nil || options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) if *options.SequenceNumberAction != SequenceNumberActionIncrement { headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) @@ -627,3 +640,13 @@ func pathForResource(container, name string) string { } return fmt.Sprintf("/%s", container) } + +func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error { + readAndCloseBody(resp.body) + err := checkRespCode(resp.statusCode, []int{http.StatusCreated}) + if err != nil { + return err + } + b.Properties.BlobType = bt + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go index 8f0864e34d..e11af77441 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" @@ -8,70 +22,122 @@ import ( "time" ) -// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. +// OverrideHeaders defines overridable response heaedrs in +// a request using a SAS URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type OverrideHeaders struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string +} + +// BlobSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type BlobSASOptions struct { + BlobServiceSASPermissions + OverrideHeaders + SASOptions +} + +// BlobServiceSASPermissions includes the available permissions for +// blob service SAS URI. +type BlobServiceSASPermissions struct { + Read bool + Add bool + Create bool + Write bool + Delete bool +} + +func (p BlobServiceSASPermissions) buildString() string { + permissions := "" + if p.Read { + permissions += "r" + } + if p.Add { + permissions += "a" + } + if p.Create { + permissions += "c" + } + if p.Write { + permissions += "w" + } + if p.Delete { + permissions += "d" + } + return permissions +} + +// GetSASURI creates an URL to the blob which contains the Shared +// Access Signature with the specified options. // -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetURL() - ) - canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth, true) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { + uri := b.GetURL() + signedResource := "b" + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) if err != nil { return "", err } - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + permissions := options.BlobServiceSASPermissions.buildString() + return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + expiry := options.Expiry.UTC().Format(time.RFC3339) // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) if err != nil { return "", err } - signedExpiry := expiry.UTC().Format(time.RFC3339) - - //If blob name is missing, resource is a container - signedResource := "c" - if len(b.Name) > 0 { - signedResource = "b" - } - protocols := "" - if HTTPSOnly { + if options.UseHTTPS { protocols = "https" } - stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers) if err != nil { return "", err } - sig := b.Container.bsc.client.computeHmac256(stringToSign) + sig := c.computeHmac256(stringToSign) sasParams := url.Values{ - "sv": {b.Container.bsc.client.apiVersion}, - "se": {signedExpiry}, + "sv": {c.apiVersion}, + "se": {expiry}, "sr": {signedResource}, - "sp": {signedPermissions}, + "sp": {permissions}, "sig": {sig}, } - if b.Container.bsc.client.apiVersion >= "2015-04-05" { + if c.apiVersion >= "2015-04-05" { if protocols != "" { sasParams.Add("spr", protocols) } - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) + if options.IP != "" { + sasParams.Add("sip", options.IP) } } - sasURL, err := url.Parse(blobURL) + // Add override response hedaers + addQueryParameter(sasParams, "rscc", headers.CacheControl) + addQueryParameter(sasParams, "rscd", headers.ContentDisposition) + addQueryParameter(sasParams, "rsce", headers.ContentEncoding) + addQueryParameter(sasParams, "rscl", headers.ContentLanguage) + addQueryParameter(sasParams, "rsct", headers.ContentType) + + sasURL, err := url.Parse(uri) if err != nil { return "", err } @@ -79,16 +145,12 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st return sasURL.String(), nil } -// GetSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) { - return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false) -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) { + rscc := headers.CacheControl + rscd := headers.ContentDisposition + rsce := headers.ContentEncoding + rscl := headers.ContentLanguage + rsct := headers.ContentType if signedVersion >= "2015-02-21" { canonicalizedResource = "/blob" + canonicalizedResource diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index 450b20f967..8fe21b0cfd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -1,9 +1,25 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( + "fmt" "net/http" "net/url" "strconv" + "strings" ) // BlobStorageClient contains operations for Microsoft Azure Blob Storage @@ -45,6 +61,21 @@ func (b *BlobStorageClient) GetContainerReference(name string) *Container { } } +// GetContainerReferenceFromSASURI returns a Container object for the specified +// container SASURI +func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { + path := strings.Split(sasuri.Path, "/") + if len(path) <= 1 { + return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) + } + cli := newSASClient().GetBlobService() + return &Container{ + bsc: &cli, + Name: path[1], + sasuri: sasuri, + }, nil +} + // ListContainers returns the list of containers in a storage account along with // pagination token and other response details. // diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go index 5258f24fd7..e0176d664a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/xml" @@ -132,8 +146,7 @@ func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeBlock) } // PutBlockOptions includes the options for a put block operation @@ -181,8 +194,7 @@ func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, o if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypeBlock) } // PutBlockListOptions includes the options for a put block list operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 42fa702f65..8f6cd95da7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -1,6 +1,20 @@ // Package storage provides clients for Microsoft Azure Storage Services. package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bufio" "bytes" @@ -33,7 +47,9 @@ const ( // basic client is created. DefaultAPIVersion = "2016-05-31" - defaultUseHTTPS = true + defaultUseHTTPS = true + defaultRetryAttempts = 5 + defaultRetryDuration = time.Second * 5 // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator StorageEmulatorAccountName = "devstoreaccount1" @@ -56,7 +72,14 @@ const ( ) var ( - validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } ) // Sender sends a request @@ -112,6 +135,8 @@ type Client struct { baseURL string apiVersion string userAgent string + sasClient bool + accountSASToken url.Values } type storageResponse struct { @@ -206,13 +231,13 @@ func NewEmulatorClient() (Client, error) { // NewClient constructs a Client. This should be used if the caller wants // to specify whether to use HTTPS, a specific REST API version or a custom // storage endpoint than Azure Public Cloud. -func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { +func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { var c Client if !IsValidStorageAccount(accountName) { return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) } else if accountKey == "" { return c, fmt.Errorf("azure: account key required") - } else if blobServiceBaseURL == "" { + } else if serviceBaseURL == "" { return c, fmt.Errorf("azure: base storage service url required") } @@ -226,19 +251,14 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u accountName: accountName, accountKey: key, useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, + baseURL: serviceBaseURL, apiVersion: apiVersion, + sasClient: false, UseSharedKeyLite: false, Sender: &DefaultSender{ - RetryAttempts: 5, - ValidStatusCodes: []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - }, - RetryDuration: time.Second * 5, + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, }, } c.userAgent = c.getDefaultUserAgent() @@ -251,6 +271,43 @@ func IsValidStorageAccount(account string) bool { return validStorageAccount.MatchString(account) } +// NewAccountSASClient contructs a client that uses accountSAS authorization +// for its operations. +func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { + c := newSASClient() + c.accountSASToken = token + c.accountName = account + c.baseURL = env.StorageEndpointSuffix + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c +} + +func newSASClient() Client { + c := Client{ + HTTPClient: http.DefaultClient, + apiVersion: DefaultAPIVersion, + sasClient: true, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c +} + +func (c Client) isServiceSASClient() bool { + return c.sasClient && c.accountSASToken == nil +} + +func (c Client) isAccountSASClient() bool { + return c.sasClient && c.accountSASToken != nil +} + func (c Client) getDefaultUserAgent() string { return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", runtime.Version(), @@ -323,6 +380,164 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { return u.String() } +// AccountSASTokenOptions includes options for constructing +// an account SAS token. +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +type AccountSASTokenOptions struct { + APIVersion string + Services Services + ResourceTypes ResourceTypes + Permissions Permissions + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool +} + +// Services specify services accessible with an account SAS. +type Services struct { + Blob bool + Queue bool + Table bool + File bool +} + +// ResourceTypes specify the resources accesible with an +// account SAS. +type ResourceTypes struct { + Service bool + Container bool + Object bool +} + +// Permissions specifies permissions for an accountSAS. +type Permissions struct { + Read bool + Write bool + Delete bool + List bool + Add bool + Create bool + Update bool + Process bool +} + +// GetAccountSASToken creates an account SAS token +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { + if options.APIVersion == "" { + options.APIVersion = c.apiVersion + } + + if options.APIVersion < "2015-04-05" { + return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) + } + + // build services string + services := "" + if options.Services.Blob { + services += "b" + } + if options.Services.Queue { + services += "q" + } + if options.Services.Table { + services += "t" + } + if options.Services.File { + services += "f" + } + + // build resources string + resources := "" + if options.ResourceTypes.Service { + resources += "s" + } + if options.ResourceTypes.Container { + resources += "c" + } + if options.ResourceTypes.Object { + resources += "o" + } + + // build permissions string + permissions := "" + if options.Permissions.Read { + permissions += "r" + } + if options.Permissions.Write { + permissions += "w" + } + if options.Permissions.Delete { + permissions += "d" + } + if options.Permissions.List { + permissions += "l" + } + if options.Permissions.Add { + permissions += "a" + } + if options.Permissions.Create { + permissions += "c" + } + if options.Permissions.Update { + permissions += "u" + } + if options.Permissions.Process { + permissions += "p" + } + + // build start time, if exists + start := "" + if options.Start != (time.Time{}) { + start = options.Start.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + start = start[:10] + } + + // build expiry time + expiry := options.Expiry.Format(time.RFC3339) + // For some reason I don't understand, it fails when the rest of the string is included + expiry = expiry[:10] + + protocol := "https,http" + if options.UseHTTPS { + protocol = "https" + } + + stringToSign := strings.Join([]string{ + c.accountName, + permissions, + services, + resources, + start, + expiry, + options.IP, + protocol, + options.APIVersion, + "", + }, "\n") + signature := c.computeHmac256(stringToSign) + + sasParams := url.Values{ + "sv": {options.APIVersion}, + "ss": {services}, + "srt": {resources}, + "sp": {permissions}, + "se": {expiry}, + "spr": {protocol}, + "sig": {signature}, + } + if start != "" { + sasParams.Add("st", start) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + + return sasParams, nil +} + // GetBlobService returns a BlobStorageClient which can operate on the blob // service of the storage account. func (c Client) GetBlobService() BlobStorageClient { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go new file mode 100644 index 0000000000..e898e9bfaf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -0,0 +1,38 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/url" + "time" +) + +// SASOptions includes options used by SAS URIs for different +// services and resources. +type SASOptions struct { + APIVersion string + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool + Identifier string +} + +func addQueryParameter(query url.Values, key, value string) url.Values { + if value != "" { + query.Add(key, value) + } + return query +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index c2c9c055b5..8963c7a89b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -18,12 +32,66 @@ type Container struct { Name string `xml:"Name"` Properties ContainerProperties `xml:"Properties"` Metadata map[string]string + sasuri url.URL +} + +// Client returns the HTTP client used by the Container reference. +func (c *Container) Client() *Client { + return &c.bsc.client } func (c *Container) buildPath() string { return fmt.Sprintf("/%s", c.Name) } +// GetURL gets the canonical URL to the container. +// This method does not create a publicly accessible URL if the container +// is private and this method does not check if the blob exists. +func (c *Container) GetURL() string { + container := c.Name + if container == "" { + container = "$root" + } + return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) +} + +// ContainerSASOptions are options to construct a container SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type ContainerSASOptions struct { + ContainerSASPermissions + OverrideHeaders + SASOptions +} + +// ContainerSASPermissions includes the available permissions for +// a container SAS URI. +type ContainerSASPermissions struct { + BlobServiceSASPermissions + List bool +} + +// GetSASURI creates an URL to the container which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { + uri := c.GetURL() + signedResource := "c" + canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) + if err != nil { + return "", err + } + + // build permissions string + permissions := options.BlobServiceSASPermissions.buildString() + if options.List { + permissions += "l" + } + + return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + // ContainerProperties contains various properties of a container returned from // various endpoints like ListContainers. type ContainerProperties struct { @@ -224,7 +292,20 @@ func (c *Container) create(options *CreateContainerOptions) (*storageResponse, e // Exists returns true if a container with given name exists // on the storage account, otherwise returns false. func (c *Container) Exists() (bool, error) { - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + q := url.Values{"restype": {"container"}} + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } headers := c.bsc.client.getStandardHeaders() resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) @@ -399,9 +480,20 @@ func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, e func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { q := mergeParams(params.getParameters(), url.Values{ "restype": {"container"}, - "comp": {"list"}}, - ) - uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + "comp": {"list"}, + }) + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + } else { + if c.bsc.client.isAccountSASClient() { + q = mergeParams(q, c.bsc.client.accountSASToken) + } + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } headers := c.bsc.client.getStandardHeaders() headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go index f143426188..a4cc2527b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go index 57053efd16..189e038024 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go index 13e9475073..9668ea6694 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index b4022bcb6d..5fb516c55f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go index 81217bdfa8..295e3d3e25 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go index 415b740183..3d9d52d8e3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go index 3ededcd421..7d9038a5f7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go index 41d832e2be..800adf129d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // MetadataLevel determines if operations should return a paylod, // and it level of detail. type MetadataLevel string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go index 468b3868ac..c59fd4b50b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" @@ -186,6 +200,5 @@ func (b *Blob) PutPageBlob(options *PutBlobOptions) error { if err != nil { return err } - readAndCloseBody(resp.body) - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + return b.respondCreation(resp, BlobTypePage) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index c2c7f742c4..499592ebd1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/xml" "errors" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go new file mode 100644 index 0000000000..28d9ab937e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -0,0 +1,146 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// QueueSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type QueueSASOptions struct { + QueueSASPermissions + SASOptions +} + +// QueueSASPermissions includes the available permissions for +// a queue SAS URI. +type QueueSASPermissions struct { + Read bool + Add bool + Update bool + Process bool +} + +func (q QueueSASPermissions) buildString() string { + permissions := "" + + if q.Read { + permissions += "r" + } + if q.Add { + permissions += "a" + } + if q.Update { + permissions += "u" + } + if q.Process { + permissions += "p" + } + return permissions +} + +// GetSASURI creates an URL to the specified queue which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { + canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedStart := "" + if options.Start != (time.Time{}) { + signedStart = options.Start.UTC().Format(time.RFC3339) + } + signedExpiry := options.Expiry.UTC().Format(time.RFC3339) + + protocols := "https,http" + if options.UseHTTPS { + protocols = "https" + } + + permissions := options.QueueSASPermissions.buildString() + stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) + if err != nil { + return "", err + } + + sig := q.qsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {q.qsc.client.apiVersion}, + "se": {signedExpiry}, + "sp": {permissions}, + "sig": {sig}, + } + + if q.qsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + addQueryParameter(sasParams, "sip", options.IP) + } + + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/queue" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + signedPermissions, + signedStart, + signedExpiry, + canonicalizedResource, + signedIdentifier, + signedIP, + protocols, + signedVersion), nil + + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go index 19b44941c8..29febe146f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // QueueServiceClient contains operations for Microsoft Azure Queue Storage // Service. type QueueServiceClient struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go index e6a868081a..a14d9d3244 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go index bee1c31ad6..056ab398a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go index 88700fbc93..c102619c98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" "net/url" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 4eae3af9df..6c01d32ee1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" @@ -174,11 +188,7 @@ func (t *Table) Delete(timeout uint, options *TableOptions) error { } defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - - } - return nil + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } // QueryOptions includes options for a query entities operation. @@ -261,10 +271,7 @@ func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *T } defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - return nil + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go index 7a0f0915c6..3f882417c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go index 895dcfded8..456bee7733 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index d3ae9d092d..7734b8f886 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "crypto/hmac" @@ -18,7 +32,29 @@ import ( ) var ( - fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + accountSASOptions = AccountSASTokenOptions{ + Services: Services{ + Blob: true, + }, + ResourceTypes: ResourceTypes{ + Service: true, + Container: true, + Object: true, + }, + Permissions: Permissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + }, + Expiry: fixedTime, + UseHTTPS: true, + } ) func (c Client) computeHmac256(message string) string { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go index 345bb28f26..67ff6ca03f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go @@ -1,5 +1,19 @@ // +build !go1.8 +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package storage import ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go index ed8b77919b..eada102c0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go @@ -1,5 +1,19 @@ // +build go1.8 +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package storage import ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go index a23fff1e2e..1cd3e03d12 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -1,5 +1,19 @@ package storage +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + var ( sdkVersion = "10.0.2" ) diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go index 1bd6057da8..bcbe00d0c5 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -5,7 +5,7 @@ type csiEntryState struct { } func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) + csiState.parser.logf("CsiEntry::Handle %#x", b) nextState, err := csiState.baseState.Handle(b) if nextState != nil || err != nil { @@ -25,7 +25,7 @@ func (csiState csiEntryState) Handle(b byte) (s state, e error) { } func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) csiState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go index 4be35c5fd2..7ed5e01c34 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -5,7 +5,7 @@ type csiParamState struct { } func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) + csiState.parser.logf("CsiParam::Handle %#x", b) nextState, err := csiState.baseState.Handle(b) if nextState != nil || err != nil { @@ -26,7 +26,7 @@ func (csiState csiParamState) Handle(b byte) (s state, e error) { } func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) csiState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go index 2189eb6b6b..1c719db9e4 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -5,7 +5,7 @@ type escapeIntermediateState struct { } func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) + escState.parser.logf("escapeIntermediateState::Handle %#x", b) nextState, err := escState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err @@ -24,7 +24,7 @@ func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { } func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) escState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go index 7b1b9ad3f1..6390abd231 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -5,7 +5,7 @@ type escapeState struct { } func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) + escState.parser.logf("escapeState::Handle %#x", b) nextState, err := escState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err @@ -28,7 +28,7 @@ func (escState escapeState) Handle(b byte) (s state, e error) { } func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) escState.baseState.Transition(s) switch s { diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 24062d420e..593b10ab69 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -5,7 +5,7 @@ type oscStringState struct { } func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) + oscState.parser.logf("OscString::Handle %#x", b) nextState, err := oscState.baseState.Handle(b) if nextState != nil || err != nil { return nextState, err diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go index 3286a9cb5e..03cec7ada6 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -2,14 +2,10 @@ package ansiterm import ( "errors" - "io/ioutil" + "log" "os" - - "github.com/sirupsen/logrus" ) -var logger *logrus.Logger - type AnsiParser struct { currState state eventHandler AnsiEventHandler @@ -23,50 +19,69 @@ type AnsiParser struct { ground state oscString state stateMap []state + + logf func(string, ...interface{}) } -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard +type Option func(*AnsiParser) - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f } +} - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ eventHandler: evtHandler, context: &ansiContext{}, } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, + for _, o := range opts { + o(ap) } - parser.currState = getState(initialState, parser.stateMap) + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } - logger.Infof("CreateParser: parser %p", parser) - return parser + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap } func getState(name string, states []state) state { @@ -97,7 +112,7 @@ func (ap *AnsiParser) handle(b byte) error { } if newState == nil { - logger.Warning("newState is nil") + ap.logf("WARNING: newState is nil") return errors.New("New state of 'nil' is invalid.") } @@ -111,23 +126,23 @@ func (ap *AnsiParser) handle(b byte) error { } func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) // Exit old state if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) return err } // Perform transition action if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) return err } // Enter new state if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) return err } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go index 8b69a67a5a..de0a1f9cde 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -27,7 +27,6 @@ func parseParams(bytes []byte) ([]string, error) { params = append(params, s) } - logger.Infof("Parsed params: %v with length: %d", params, len(params)) return params, nil } @@ -37,7 +36,6 @@ func parseCmd(context ansiContext) (string, error) { func getInt(params []string, dflt int) int { i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) return i } @@ -60,8 +58,6 @@ func getInts(params []string, minCount int, dflt int) []int { } } - logger.Infof("getInts: %v", ints) - return ints } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go index 58750a2d2b..0bb5e51e9a 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -1,19 +1,15 @@ package ansiterm -import ( - "fmt" -) - func (ap *AnsiParser) collectParam() error { currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) + ap.logf("collectParam %#x", currChar) ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) return nil } func (ap *AnsiParser) collectInter() error { currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) + ap.logf("collectInter %#x", currChar) ap.context.paramBuffer = append(ap.context.interBuffer, currChar) return nil } @@ -21,8 +17,8 @@ func (ap *AnsiParser) collectInter() error { func (ap *AnsiParser) escDispatch() error { cmd, _ := parseCmd(*ap.context) intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) switch cmd { case "D": // IND @@ -43,8 +39,9 @@ func (ap *AnsiParser) escDispatch() error { func (ap *AnsiParser) csiDispatch() error { cmd, _ := parseCmd(*ap.context) params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) - logger.Infof("csiDispatch: %v(%v)", cmd, params) + ap.logf("csiDispatch: %v(%v)", cmd, params) switch cmd { case "@": @@ -102,7 +99,7 @@ func (ap *AnsiParser) csiDispatch() error { top, bottom := ints[0], ints[1] return ap.eventHandler.DECSTBM(top, bottom) default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) return nil } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go index daf2f06961..a673279726 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -175,7 +175,7 @@ func GetStdFile(nFile int) (*os.File, uintptr) { fd, err := syscall.GetStdHandle(nFile) if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) } return file, uintptr(fd) diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go index 462d92f8ef..6055e33b91 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -49,17 +49,22 @@ var ( const ( // Console modes // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 // Character attributes // Note: diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go index f015723ade..3ee06ea728 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -34,7 +34,7 @@ func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL if err != nil { return err } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) return err } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go index 706d270577..2d27fa1d02 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -50,8 +50,8 @@ func (h *windowsAnsiEventHandler) insertLines(param int) error { // scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) // Copy from and clip to the scroll region (full buffer width) scrollRect := SMALL_RECT{ diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 48998bb051..2d40fb75ad 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -4,16 +4,13 @@ package winterm import ( "bytes" - "io/ioutil" + "log" "os" "strconv" "github.com/Azure/go-ansiterm" - "github.com/sirupsen/logrus" ) -var logger *logrus.Logger - type windowsAnsiEventHandler struct { fd uintptr file *os.File @@ -28,32 +25,52 @@ type windowsAnsiEventHandler struct { marginByte byte curInfo *CONSOLE_SCREEN_BUFFER_INFO curPos COORD + logf func(string, ...interface{}) } -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard +type Option func(*windowsAnsiEventHandler) - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f } +} +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { infoReset, err := GetConsoleScreenBufferInfo(fd) if err != nil { return nil } - return &windowsAnsiEventHandler{ + h := &windowsAnsiEventHandler{ fd: fd, file: file, infoReset: infoReset, attributes: infoReset.Attributes, } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h } type scrollRegion struct { @@ -96,7 +113,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { if err := h.Flush(); err != nil { return false, err } - logger.Info("Simulating LF inside scroll region") + h.logf("Simulating LF inside scroll region") if err := h.scrollUp(1); err != nil { return false, err } @@ -119,7 +136,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { } else { // The cursor is at the bottom of the screen but outside the scroll // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") + h.logf("Simulating LF outside scroll region") if includeCR { if err := h.Flush(); err != nil { return false, err @@ -151,7 +168,7 @@ func (h *windowsAnsiEventHandler) executeLF() error { if err := h.Flush(); err != nil { return err } - logger.Info("Resetting cursor position for LF without CR") + h.logf("Resetting cursor position for LF without CR") if err := SetConsoleCursorPosition(h.fd, pos); err != nil { return err } @@ -186,7 +203,7 @@ func (h *windowsAnsiEventHandler) Print(b byte) error { func (h *windowsAnsiEventHandler) Execute(b byte) error { switch b { case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") + h.logf("Execute(TAB)") // Move to the next tab stop, but preserve auto-wrap if already set. if !h.wrapNext { pos, info, err := h.getCurrentInfo() @@ -269,7 +286,7 @@ func (h *windowsAnsiEventHandler) CUU(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(-param) } @@ -278,7 +295,7 @@ func (h *windowsAnsiEventHandler) CUD(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorVertical(param) } @@ -287,7 +304,7 @@ func (h *windowsAnsiEventHandler) CUF(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(param) } @@ -296,7 +313,7 @@ func (h *windowsAnsiEventHandler) CUB(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorHorizontal(-param) } @@ -305,7 +322,7 @@ func (h *windowsAnsiEventHandler) CNL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(param) } @@ -314,7 +331,7 @@ func (h *windowsAnsiEventHandler) CPL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorLine(-param) } @@ -323,7 +340,7 @@ func (h *windowsAnsiEventHandler) CHA(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.moveCursorColumn(param) } @@ -332,7 +349,7 @@ func (h *windowsAnsiEventHandler) VPA(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("VPA: [[%d]]", param) + h.logf("VPA: [[%d]]", param) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { @@ -348,7 +365,7 @@ func (h *windowsAnsiEventHandler) CUP(row int, col int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("CUP: [[%d %d]]", row, col) + h.logf("CUP: [[%d %d]]", row, col) h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { @@ -364,7 +381,7 @@ func (h *windowsAnsiEventHandler) HVP(row int, col int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("HVP: [[%d %d]]", row, col) + h.logf("HVP: [[%d %d]]", row, col) h.clearWrap() return h.CUP(row, col) } @@ -373,7 +390,7 @@ func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) h.clearWrap() return nil } @@ -382,7 +399,7 @@ func (h *windowsAnsiEventHandler) DECOM(enable bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) h.clearWrap() h.originMode = enable return h.CUP(1, 1) @@ -392,7 +409,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) h.clearWrap() if err := h.ED(2); err != nil { return err @@ -407,7 +424,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { } if info.Size.X < targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) + h.logf("set buffer failed: %v", err) return err } } @@ -415,12 +432,12 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { window.Left = 0 window.Right = targetWidth - 1 if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) + h.logf("set window failed: %v", err) return err } if info.Size.X > targetWidth { if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) + h.logf("set buffer failed: %v", err) return err } } @@ -431,7 +448,7 @@ func (h *windowsAnsiEventHandler) ED(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() // [J -- Erases from the cursor to the end of the screen, including the cursor position. @@ -490,7 +507,7 @@ func (h *windowsAnsiEventHandler) EL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("EL: [%v]", strconv.Itoa(param)) + h.logf("EL: [%v]", strconv.Itoa(param)) h.clearWrap() // [K -- Erases from the cursor to the end of the line, including the cursor position. @@ -531,7 +548,7 @@ func (h *windowsAnsiEventHandler) IL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("IL: [%v]", strconv.Itoa(param)) + h.logf("IL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertLines(param) } @@ -540,7 +557,7 @@ func (h *windowsAnsiEventHandler) DL(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DL: [%v]", strconv.Itoa(param)) + h.logf("DL: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteLines(param) } @@ -549,7 +566,7 @@ func (h *windowsAnsiEventHandler) ICH(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) + h.logf("ICH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.insertCharacters(param) } @@ -558,7 +575,7 @@ func (h *windowsAnsiEventHandler) DCH(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) + h.logf("DCH: [%v]", strconv.Itoa(param)) h.clearWrap() return h.deleteCharacters(param) } @@ -572,7 +589,7 @@ func (h *windowsAnsiEventHandler) SGR(params []int) error { strings = append(strings, strconv.Itoa(v)) } - logger.Infof("SGR: [%v]", strings) + h.logf("SGR: [%v]", strings) if len(params) <= 0 { h.attributes = h.infoReset.Attributes @@ -606,7 +623,7 @@ func (h *windowsAnsiEventHandler) SU(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollUp(param) } @@ -615,13 +632,13 @@ func (h *windowsAnsiEventHandler) SD(param int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) h.clearWrap() return h.scrollDown(param) } func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) + h.logf("DA: [%v]", params) // DA cannot be implemented because it must send data on the VT100 input stream, // which is not available to go-ansiterm. return nil @@ -631,7 +648,7 @@ func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { if err := h.Flush(); err != nil { return err } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) + h.logf("DECSTBM: [%d, %d]", top, bottom) // Windows is 0 indexed, Linux is 1 indexed h.sr.top = int16(top - 1) @@ -646,7 +663,7 @@ func (h *windowsAnsiEventHandler) RI() error { if err := h.Flush(); err != nil { return err } - logger.Info("RI: []") + h.logf("RI: []") h.clearWrap() info, err := GetConsoleScreenBufferInfo(h.fd) @@ -663,21 +680,21 @@ func (h *windowsAnsiEventHandler) RI() error { } func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") + h.logf("IND: []") return h.executeLF() } func (h *windowsAnsiEventHandler) Flush() error { h.curInfo = nil if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) + h.logf("Flush: [%s]", h.buffer.Bytes()) if _, err := h.buffer.WriteTo(h.file); err != nil { return err } } if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) + h.logf("Flush: drawing margin byte '%c'", h.marginByte) info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 12375e0e4b..49e9214d59 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/url" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index 6c511f8c87..b38f4c2458 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + /* This file is largely based on rjw57/oauth2device's code, with the follow differences: * scope -> resource, and only allow a single one diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go index e87911e835..5e02d52ac2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi.go @@ -2,5 +2,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // msiPath is the path to the MSI Extension settings file (to discover the endpoint) var msiPath = "/var/lib/waagent/ManagedIdentity-Settings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go index 80f8004327..261b568829 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/msi_windows.go @@ -2,6 +2,20 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "os" "strings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index 73711c6674..9e15f2751f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go index 7928c971ab..0e5ad14d39 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 2ac8c3c220..67dd97a18c 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -1,5 +1,19 @@ package adal +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "crypto/rand" "crypto/rsa" diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 314ed7876b..71e3ced2d6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 51f1c4bbca..37b907c77f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -57,6 +57,20 @@ generated clients, see the Client described below. */ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "net/http" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 332a8909d1..ffbc8da28e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -1,5 +1,19 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go index 3f4d13421a..fa18356476 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -5,6 +5,20 @@ See the included examples for more detail. */ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "encoding/json" "fmt" @@ -165,7 +179,13 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { if decodeErr != nil { return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) } else if e.ServiceError == nil { - e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil || e.ServiceError.Message == "" { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + } } e.RequestID = ExtractRequestID(resp) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 1cf55651f2..30c4351a57 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -1,5 +1,19 @@ package azure +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "strings" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 0000000000..6036d069a1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,202 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + err = register(client, r, re) + if err != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) + } + } + } + return resp, errors.New("failed request and resource provider registration") + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil { + if re.ServiceError.Details != nil && len(*re.ServiceError.Details) > 0 { + detail := (*re.ServiceError.Details)[0].(map[string]interface{}) + return detail["target"].(string), nil + } + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req.Cancel = originalReq.Cancel + + resp, err := autorest.SendWithSender(client.Sender, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Cancel) + if !delayed { + autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Cancel) + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index 5f1e72fbe4..ce7a605f89 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" @@ -33,7 +47,8 @@ var ( Version(), ) - statusCodesForRetry = []int{ + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ http.StatusRequestTimeout, // 408 http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 @@ -160,6 +175,7 @@ func NewClientWithUserAgent(ua string) Client { RetryDuration: 30 * time.Second, UserAgent: defaultUserAgent, } + c.Sender = c.sender() c.AddToUserAgent(ua) return c } @@ -187,10 +203,9 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { if err != nil { return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } - resp, err := SendWithSender(c.sender(), r, - DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) - Respond(resp, - c.ByInspecting()) + + resp, err := SendWithSender(c.sender(), r) + Respond(resp, c.ByInspecting()) return resp, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go index 80ca60e9b0..c457106568 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -5,6 +5,20 @@ time.Time types. And both convert to time.Time through a ToTime method. */ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go index c1af629634..b453fad049 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "regexp" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go index 11995fb9f2..48fb39ba9b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "errors" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go index e085c77eea..7073959b2a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/binary" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go index 207b1a240a..12addf0ebb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -1,5 +1,19 @@ package date +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "strings" "time" diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go index aaef2ac8ec..f724f33327 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/error.go +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "net/http" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index afd114821b..2290c40100 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 87f71e5854..a908a0adb7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go index 0ab1eb3003..fa11dbed79 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "io" diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go index e28eb2cbdb..7143cc61b5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -1,17 +1,31 @@ // +build !go1.8 +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package autorest import ( "bytes" + "io/ioutil" "net/http" ) // RetriableRequest provides facilities for retrying an HTTP request. type RetriableRequest struct { - req *http.Request - br *bytes.Reader - reset bool + req *http.Request + br *bytes.Reader } // Prepare signals that the request is about to be sent. @@ -19,21 +33,17 @@ func (rr *RetriableRequest) Prepare() (err error) { // preserve the request body; this is to support retry logic as // the underlying transport will always close the reqeust body if rr.req.Body != nil { - if rr.reset { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - } - rr.reset = false - if err != nil { - return err - } + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err } if rr.br == nil { // fall back to making a copy (only do this once) err = rr.prepareFromByteReader() } - // indicates that the request body needs to be reset - rr.reset = true } return err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go index 8c1d1aec8d..ae15c6bf96 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -1,19 +1,33 @@ // +build go1.8 +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package autorest import ( "bytes" "io" + "io/ioutil" "net/http" ) // RetriableRequest provides facilities for retrying an HTTP request. type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader - reset bool + req *http.Request + rc io.ReadCloser + br *bytes.Reader } // Prepare signals that the request is about to be sent. @@ -21,16 +35,14 @@ func (rr *RetriableRequest) Prepare() (err error) { // preserve the request body; this is to support retry logic as // the underlying transport will always close the reqeust body if rr.req.Body != nil { - if rr.reset { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - } - rr.reset = false - if err != nil { - return err - } + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err } if rr.req.GetBody != nil { // this will allow us to preserve the body without having to @@ -43,8 +55,6 @@ func (rr *RetriableRequest) Prepare() (err error) { // fall back to making a copy (only do this once) err = rr.prepareFromByteReader() } - // indicates that the request body needs to be reset - rr.reset = true } return err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 94b0298479..7264c32f27 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "fmt" "log" diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index 78067148b2..dfdc6efdff 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index a222e8efaa..f588807dbb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,5 +1,19 @@ package autorest +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + import ( "bytes" "fmt" diff --git a/vendor/github.com/Jeffail/gabs/README.md b/vendor/github.com/Jeffail/gabs/README.md index 3a8a1f6031..044e9afda0 100644 --- a/vendor/github.com/Jeffail/gabs/README.md +++ b/vendor/github.com/Jeffail/gabs/README.md @@ -265,6 +265,29 @@ jsonOutput := jsonParsedObj.Search("outter").String() ... ``` +### Merge two containers + +You can merge a JSON structure into an existing one, where collisions will be +converted into a JSON array. + +```go +jsonParsed1, _ := ParseJSON([]byte(`{"outter": {"value1": "one"}}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"outter": {"inner": {"value3": "three"}}, "outter2": {"value2": "two"}}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"outter":{"inner":{"value3":"three"},"value1":"one"},"outter2":{"value2":"two"}}` +``` + +Arrays are merged: + +```go +jsonParsed1, _ := ParseJSON([]byte(`{"array": ["one"]}`)) +jsonParsed2, _ := ParseJSON([]byte(`{"array": ["two"]}`)) + +jsonParsed1.Merge(jsonParsed2) +// Becomes `{"array":["one", "two"]}` +``` + ### Parsing Numbers Gabs uses the `json` package under the bonnet, which by default will parse all number values into `float64`. If you need to parse `Int` values then you should use a `json.Decoder` (https://golang.org/pkg/encoding/json/#Decoder): diff --git a/vendor/github.com/Jeffail/gabs/gabs.go b/vendor/github.com/Jeffail/gabs/gabs.go index 65a90e58f7..a27a7110ec 100644 --- a/vendor/github.com/Jeffail/gabs/gabs.go +++ b/vendor/github.com/Jeffail/gabs/gabs.go @@ -309,6 +309,57 @@ func (g *Container) DeleteP(path string) error { return g.Delete(strings.Split(path, ".")...) } +// Merge - Merges two gabs-containers +func (g *Container) Merge(toMerge *Container) error { + var recursiveFnc func(map[string]interface{}, []string) error + recursiveFnc = func(mmap map[string]interface{}, path []string) error { + for key, value := range mmap { + newPath := append(path, key) + if g.Exists(newPath...) { + target := g.Search(newPath...) + switch t := value.(type) { + case map[string]interface{}: + switch targetV := target.Data().(type) { + case map[string]interface{}: + if err := recursiveFnc(t, newPath); err != nil { + return err + } + case []interface{}: + g.Set(append(targetV, t), newPath...) + default: + newSlice := append([]interface{}{}, targetV) + g.Set(append(newSlice, t), newPath...) + } + case []interface{}: + for _, valueOfSlice := range t { + if err := g.ArrayAppend(valueOfSlice, newPath...); err != nil { + return err + } + } + default: + switch targetV := target.Data().(type) { + case []interface{}: + g.Set(append(targetV, t), newPath...) + default: + newSlice := append([]interface{}{}, targetV) + g.Set(append(newSlice, t), newPath...) + } + } + } else { + // path doesn't exist. So set the value + if _, err := g.Set(value, newPath...); err != nil { + return err + } + } + } + return nil + } + if mmap, ok := toMerge.Data().(map[string]interface{}); ok { + return recursiveFnc(mmap, []string{}) + } + return nil +} + //-------------------------------------------------------------------------------------------------- /* @@ -316,14 +367,20 @@ Array modification/search - Keeping these options simple right now, no need for complicated since you can just cast to []interface{}, modify and then reassign with Set. */ -// ArrayAppend - Append a value onto a JSON array. +// ArrayAppend - Append a value onto a JSON array. If the target is not a JSON array then it will be +// converted into one, with its contents as the first element of the array. func (g *Container) ArrayAppend(value interface{}, path ...string) error { - array, ok := g.Search(path...).Data().([]interface{}) - if !ok { - return ErrNotArray + if array, ok := g.Search(path...).Data().([]interface{}); ok { + array = append(array, value) + _, err := g.Set(array, path...) + return err } - array = append(array, value) - _, err := g.Set(array, path...) + + newArray := []interface{}{} + newArray = append(newArray, g.Search(path...).Data()) + newArray = append(newArray, value) + + _, err := g.Set(newArray, path...) return err } diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md index 09e8a32cbe..f33e2e9d77 100644 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -4,7 +4,7 @@ Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. -[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) +[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) ## Install diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go index 645e1b76f7..6d0fc190a1 100644 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -299,7 +299,7 @@ func sortQuery(u *url.URL) { if len(q) > 0 { arKeys := make([]string, len(q)) i := 0 - for k, _ := range q { + for k := range q { arKeys[i] = k i++ } diff --git a/vendor/github.com/SAP/go-hdb/NOTICE b/vendor/github.com/SAP/go-hdb/NOTICE old mode 100755 new mode 100644 diff --git a/vendor/github.com/SAP/go-hdb/driver/driver.go b/vendor/github.com/SAP/go-hdb/driver/driver.go index ca5009e6a0..e0f6cfb59a 100644 --- a/vendor/github.com/SAP/go-hdb/driver/driver.go +++ b/vendor/github.com/SAP/go-hdb/driver/driver.go @@ -32,7 +32,7 @@ import ( ) // DriverVersion is the version number of the hdb driver. -const DriverVersion = "0.9.1" +const DriverVersion = "0.9.2" // DriverName is the driver name to use with sql.Open for hdb databases. const DriverName = "hdb" diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go index cd17730421..8fe1de8023 100644 --- a/vendor/github.com/armon/go-metrics/inmem.go +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -70,7 +70,7 @@ func NewIntervalMetrics(intv time.Time) *IntervalMetrics { // about a sample type AggregateSample struct { Count int // The count of emitted pairs - Rate float64 `json:"-"` // The count of emitted pairs per time unit (usually 1 second) + Rate float64 // The values rate per time unit (usually 1 second) Sum float64 // The sum of values SumSq float64 `json:"-"` // The sum of squared values Min float64 // Minimum value @@ -107,7 +107,7 @@ func (a *AggregateSample) Ingest(v float64, rateDenom float64) { if v > a.Max || a.Count == 1 { a.Max = v } - a.Rate = float64(a.Count) / rateDenom + a.Rate = float64(a.Sum) / rateDenom a.LastUpdated = time.Now() } diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go index 280b1c455d..b9c32079b6 100644 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -1,5 +1,7 @@ package govalidator +import "strings" + // Errors is an array of multiple errors and conforms to the error interface. type Errors []error @@ -9,11 +11,11 @@ func (es Errors) Errors() []error { } func (es Errors) Error() string { - var err string + var errs []string for _, e := range es { - err += e.Error() + ";" + errs = append(errs, e.Error()) } - return err + return strings.Join(errs, ";") } // Error encapsulates a name, an error and whether there's a custom error message or not. @@ -21,6 +23,9 @@ type Error struct { Name string Err error CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string } func (e Error) Error() string { diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go index 5297595591..4a34e2240d 100644 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -33,7 +33,6 @@ const ( IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` URLUsername string = `(\S+(:\S*)?@)` - Hostname string = `` URLPath string = `((\/|\?|#)[^\s]*)` URLPort string = `(:(\d{1,5}))` URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index b699e44490..f35de47a40 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -20,6 +20,7 @@ var ( fieldsRequiredByDefault bool notNumberRegexp = regexp.MustCompile("[^0-9]+") whiteSpacesAndMinus = regexp.MustCompile("[\\s-]+") + paramsRegexp = regexp.MustCompile("\\(.*\\)$") ) const maxURLRuneCount = 2083 @@ -630,8 +631,11 @@ func ValidateStruct(s interface{}) (bool, error) { // parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} func parseTagIntoMap(tag string) tagOptionsMap { optionsMap := make(tagOptionsMap) - options := strings.SplitN(tag, ",", -1) + options := strings.Split(tag, ",") + for _, option := range options { + option = strings.TrimSpace(option) + validationOptions := strings.Split(option, "~") if !isValidTag(validationOptions[0]) { continue @@ -776,11 +780,11 @@ func IsIn(str string, params ...string) bool { func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { if requiredOption, isRequired := options["required"]; isRequired { if len(requiredOption) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption), true} + return false, Error{t.Name, fmt.Errorf(requiredOption), true, "required"} } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false} + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required"} } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false} + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required"} } // not required and empty is valid return true, nil @@ -799,7 +803,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options if !fieldsRequiredByDefault { return true, nil } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false} + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required"} case "-": return true, nil } @@ -822,10 +826,10 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options if result := validatefunc(v.Interface(), o.Interface()); !result { if len(customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf(customErrorMessage), CustomErrorMessageExists: true}) + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf(customErrorMessage), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) continue } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false}) + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) } } } @@ -844,7 +848,7 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options for validator := range options { isValid = false resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false} + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator)} return } } @@ -888,16 +892,16 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options field := fmt.Sprint(v) // make value into string, then validate with regex if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { if customMsgExists { - return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists} + return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists, stripParams(validatorSpec)} } if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists} + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)} } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists} + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)} } default: // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false} + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec)} } } @@ -909,17 +913,17 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options field := fmt.Sprint(v) // make value into string, then validate with regex if result := validatefunc(field); !result && !negate || result && negate { if customMsgExists { - return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists} + return false, Error{t.Name, fmt.Errorf(customErrorMessage), customMsgExists, stripParams(validatorSpec)} } if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists} + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)} } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists} + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec)} } default: //Not Yet Supported Types (Fail here!) err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false} + return false, Error{t.Name, err, false, stripParams(validatorSpec)} } } } @@ -933,9 +937,18 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options sort.Sort(sv) result := true for _, k := range sv { - resultItem, err := ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - return false, err + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + return false, err + } } result = result && resultItem } @@ -978,6 +991,10 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options } } +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.String, reflect.Array: diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 56fefcfc6a..64c7f42ef9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -319,6 +319,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -436,10 +438,17 @@ var awsPartition = partition{ "cloudhsmv2": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "cloudsearch": service{ @@ -710,6 +719,7 @@ var awsPartition = partition{ "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -777,6 +787,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -793,6 +804,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1008,6 +1020,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, @@ -1023,6 +1036,8 @@ var awsPartition = partition{ Endpoints: endpoints{ "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1031,6 +1046,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "us-east-1": endpoint{}, @@ -1545,6 +1561,7 @@ var awsPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -1825,6 +1842,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1994,6 +2012,7 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "es": service{}, "events": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 4b102f8f20..f1adcf4819 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -7,6 +7,9 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -157,7 +160,7 @@ func envConfigLoad(enableSharedConfig bool) envConfig { if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c73d6d360d..d7562331da 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.10.46" +const SDKVersion = "1.12.19" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca952ad..5ce9cba329 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go index 53639be53f..5ebc580728 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -1,84 +1,27 @@ -// AttributeValue Marshaling and Unmarshaling Helpers -// -// Utility helpers to marshal and unmarshal AttributeValue to and -// from Go types can be found in the dynamodbattribute sub package. This package -// provides has specialized functions for the common ways of working with -// AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and -// directly with *AttributeValue. This is helpful for marshaling Go types for API -// operations such as PutItem, and unmarshaling Query and Scan APIs' responses. -// -// See the dynamodbattribute package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ -// -// AttributeValue Marshaling -// -// To marshal a Go type to an AttributeValue you can use the Marshal -// functions in the dynamodbattribute package. There are specialized versions -// of these functions for collections of AttributeValue, such as maps and lists. -// -// The following example uses MarshalMap to convert the Record Go type to a -// dynamodb.AttributeValue type and use the value to make a PutItem API request. -// -// type Record struct { -// ID string -// URLs []string -// } -// -// //... -// -// r := Record{ -// ID: "ABC123", -// URLs: []string{ -// "https://example.com/first/link", -// "https://example.com/second/url", -// }, -// } -// av, err := dynamodbattribute.MarshalMap(r) -// if err != nil { -// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err)) -// } -// -// _, err = svc.PutItem(&dynamodb.PutItemInput{ -// TableName: aws.String(myTableName), -// Item: av, -// }) -// if err != nil { -// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err)) -// } -// -// AttributeValue Unmarshaling -// -// To unmarshal a dynamodb.AttributeValue to a Go type you can use the Unmarshal -// functions in the dynamodbattribute package. There are specialized versions -// of these functions for collections of AttributeValue, such as maps and lists. -// -// The following example will unmarshal the DynamoDB's Scan API operation. The -// Items returned by the operation will be unmarshaled into the slice of Records -// Go type. -// -// type Record struct { -// ID string -// URLs []string -// } -// -// //... -// -// var records []Record -// -// // Use the ScanPages method to perform the scan with pagination. Use -// // just Scan method to make the API call without pagination. -// err := svc.ScanPages(&dynamodb.ScanInput{ -// TableName: aws.String(myTableName), -// }, func(page *dynamodb.ScanOutput, last bool) bool { -// recs := []Record{} -// -// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs) -// if err != nil { -// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err)) -// } -// -// records = append(records, recs...) -// -// return true // keep paging -// }) +/* +AttributeValue Marshaling and Unmarshaling Helpers + +Utility helpers to marshal and unmarshal AttributeValue to and +from Go types can be found in the dynamodbattribute sub package. This package +provides has specialized functions for the common ways of working with +AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and +directly with *AttributeValue. This is helpful for marshaling Go types for API +operations such as PutItem, and unmarshaling Query and Scan APIs' responses. + +See the dynamodbattribute package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ + +Expression Builders + +The expression package provides utility types and functions to build DynamoDB +expression for type safe construction of API ExpressionAttributeNames, and +ExpressionAttribute Values. + +The package represents the various DynamoDB Expressions as structs named +accordingly. For example, ConditionBuilder represents a DynamoDB Condition +Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. + +See the expression package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/ +*/ package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go index 5ef80a4e78..e02497568e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go @@ -202,7 +202,7 @@ func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error { return nil } - if v.Kind() != reflect.Slice { + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { return &UnmarshalTypeError{Value: "binary", Type: v.Type()} } @@ -220,7 +220,7 @@ func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error { switch v.Type().Elem().Kind() { case reflect.Uint8: // Fallback to reflection copy for type aliased of []byte type - if v.IsNil() || v.Cap() < len(b) { + if v.Kind() != reflect.Array && (v.IsNil() || v.Cap() < len(b)) { v.Set(reflect.MakeSlice(v.Type(), len(b), len(b))) } else if v.Len() != len(b) { v.SetLen(len(b)) @@ -229,10 +229,17 @@ func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error { v.Index(i).SetUint(uint64(b[i])) } default: - if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { - reflect.Copy(v, reflect.ValueOf(b)) + if v.Kind() == reflect.Array { + switch v.Type().Elem().Kind() { + case reflect.Uint8: + reflect.Copy(v, reflect.ValueOf(b)) + default: + return &UnmarshalTypeError{Value: "binary", Type: v.Type()} + } + break } + return &UnmarshalTypeError{Value: "binary", Type: v.Type()} } @@ -251,6 +258,8 @@ func (d *Decoder) decodeBool(b *bool, v reflect.Value) error { } func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error { + isArray := false + switch v.Kind() { case reflect.Slice: // Make room for the slice elements if needed @@ -260,6 +269,7 @@ func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error { } case reflect.Array: // Limited to capacity of existing array. + isArray = true case reflect.Interface: set := make([][]byte, len(bs)) for i, b := range bs { @@ -274,7 +284,9 @@ func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error { } for i := 0; i < v.Cap() && i < len(bs); i++ { - v.SetLen(i + 1) + if !isArray { + v.SetLen(i + 1) + } u, elem := indirect(v.Index(i), false) if u != nil { return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs}) @@ -363,6 +375,8 @@ func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) { } func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error { + isArray := false + switch v.Kind() { case reflect.Slice: // Make room for the slice elements if needed @@ -372,6 +386,7 @@ func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error { } case reflect.Array: // Limited to capacity of existing array. + isArray = true case reflect.Interface: if d.UseNumber { set := make([]Number, len(ns)) @@ -396,7 +411,9 @@ func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error { } for i := 0; i < v.Cap() && i < len(ns); i++ { - v.SetLen(i + 1) + if !isArray { + v.SetLen(i + 1) + } u, elem := indirect(v.Index(i), false) if u != nil { return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns}) @@ -410,6 +427,8 @@ func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error { } func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error { + isArray := false + switch v.Kind() { case reflect.Slice: // Make room for the slice elements if needed @@ -419,6 +438,7 @@ func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) } case reflect.Array: // Limited to capacity of existing array. + isArray = true case reflect.Interface: s := make([]interface{}, len(avList)) for i, av := range avList { @@ -434,7 +454,10 @@ func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) // If v is not a slice, array for i := 0; i < v.Cap() && i < len(avList); i++ { - v.SetLen(i + 1) + if !isArray { + v.SetLen(i + 1) + } + if err := d.decode(avList[i], v.Index(i), tag{}); err != nil { return err } @@ -526,6 +549,8 @@ func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error { } func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error { + isArray := false + switch v.Kind() { case reflect.Slice: // Make room for the slice elements if needed @@ -534,6 +559,7 @@ func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error { } case reflect.Array: // Limited to capacity of existing array. + isArray = true case reflect.Interface: set := make([]string, len(ss)) for i, s := range ss { @@ -548,7 +574,9 @@ func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error { } for i := 0; i < v.Cap() && i < len(ss); i++ { - v.SetLen(i + 1) + if !isArray { + v.SetLen(i + 1) + } u, elem := indirect(v.Index(i), false) if u != nil { return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss}) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go index d9e42fac95..fb30eff919 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go @@ -362,7 +362,10 @@ func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldT func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error { switch v.Type().Elem().Kind() { case reflect.Uint8: - b := v.Bytes() + slice := reflect.MakeSlice(byteSliceType, v.Len(), v.Len()) + reflect.Copy(slice, v) + + b := slice.Bytes() if len(b) == 0 { encodeNull(av) return nil diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 3b5c98ea7c..ed9b326c0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1377,8 +1377,7 @@ func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *reques // Attaches a virtual private gateway to a VPC. You can attach one virtual private // gateway to one VPC at a time. // -// For more information, see Adding a Hardware Virtual Private Gateway to Your -// VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information, see AWS Managed VPN Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2275,8 +2274,7 @@ func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) // // Determines whether a product code is associated with an instance. This action // can only be used by the owner of the product code. It is useful when a product -// code owner needs to verify whether another user's instance is eligible for -// support. +// code owner must verify whether another user's instance is eligible for support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2306,6 +2304,80 @@ func (c *EC2) ConfirmProductInstanceWithContext(ctx aws.Context, input *ConfirmP return out, req.Send() } +const opCopyFpgaImage = "CopyFpgaImage" + +// CopyFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyFpgaImage for more information on using the CopyFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyFpgaImageRequest method. +// req, resp := client.CopyFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImageRequest(input *CopyFpgaImageInput) (req *request.Request, output *CopyFpgaImageOutput) { + op := &request.Operation{ + Name: opCopyFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyFpgaImageInput{} + } + + output = &CopyFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Copies the specified Amazon FPGA Image (AFI) to the current region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CopyFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImage +func (c *EC2) CopyFpgaImage(input *CopyFpgaImageInput) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + return out, req.Send() +} + +// CopyFpgaImageWithContext is the same as CopyFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See CopyFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CopyFpgaImageWithContext(ctx aws.Context, input *CopyFpgaImageInput, opts ...request.Option) (*CopyFpgaImageOutput, error) { + req, out := c.CopyFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCopyImage = "CopyImage" // CopyImageRequest generates a "aws/request.Request" representing the @@ -2539,9 +2611,9 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // the exception of 7224, which is reserved in the us-east-1 region, and 9059, // which is reserved in the eu-west-1 region. // -// For more information about VPN customer gateways, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN customer gateways, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // You cannot create more than one customer gateway with the same VPN type, // IP address, and BGP ASN parameter values. If you run an identical request @@ -3793,8 +3865,8 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // CreatePlacementGroup API operation for Amazon Elastic Compute Cloud. // -// Creates a placement group that you launch cluster instances into. You must -// give the group a name that's unique within the scope of your account. +// Creates a placement group that you launch cluster instances into. Give the +// group a name that's unique within the scope of your account. // // For more information about placement groups and cluster instances, see Cluster // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using_cluster_computing.html) @@ -4967,8 +5039,7 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // This is an idempotent operation. If you perform the operation more than once, // Amazon EC2 doesn't return an error. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information, see AWS Managed VPN Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5050,9 +5121,9 @@ func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInp // traffic to be routed from the virtual private gateway to the VPN customer // gateway. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN connections, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5130,8 +5201,8 @@ func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *reques // on the VPC side of your VPN connection. You can create a virtual private // gateway before creating the VPC itself. // -// For more information about virtual private gateways, see Adding a Hardware -// Virtual Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information about virtual private gateways, see AWS Managed VPN +// Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5466,6 +5537,80 @@ func (c *EC2) DeleteFlowLogsWithContext(ctx aws.Context, input *DeleteFlowLogsIn return out, req.Send() } +const opDeleteFpgaImage = "DeleteFpgaImage" + +// DeleteFpgaImageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFpgaImage operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFpgaImage for more information on using the DeleteFpgaImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFpgaImageRequest method. +// req, resp := client.DeleteFpgaImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImageRequest(input *DeleteFpgaImageInput) (req *request.Request, output *DeleteFpgaImageOutput) { + op := &request.Operation{ + Name: opDeleteFpgaImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFpgaImageInput{} + } + + output = &DeleteFpgaImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFpgaImage API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteFpgaImage for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImage +func (c *EC2) DeleteFpgaImage(input *DeleteFpgaImageInput) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + return out, req.Send() +} + +// DeleteFpgaImageWithContext is the same as DeleteFpgaImage with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFpgaImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteFpgaImageWithContext(ctx aws.Context, input *DeleteFpgaImageInput, opts ...request.Option) (*DeleteFpgaImageOutput, error) { + req, out := c.DeleteFpgaImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteInternetGateway = "DeleteInternetGateway" // DeleteInternetGatewayRequest generates a "aws/request.Request" representing the @@ -7811,9 +7956,9 @@ func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInp // // Describes one or more of your VPN customer gateways. // -// For more information about VPN customer gateways, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN customer gateways, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8219,6 +8364,80 @@ func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLo return out, req.Send() } +const opDescribeFpgaImageAttribute = "DescribeFpgaImageAttribute" + +// DescribeFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFpgaImageAttribute for more information on using the DescribeFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFpgaImageAttributeRequest method. +// req, resp := client.DescribeFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttributeRequest(input *DescribeFpgaImageAttributeInput) (req *request.Request, output *DescribeFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opDescribeFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFpgaImageAttributeInput{} + } + + output = &DescribeFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttribute +func (c *EC2) DescribeFpgaImageAttribute(input *DescribeFpgaImageAttributeInput) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// DescribeFpgaImageAttributeWithContext is the same as DescribeFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeFpgaImageAttributeWithContext(ctx aws.Context, input *DescribeFpgaImageAttributeInput, opts ...request.Option) (*DescribeFpgaImageAttributeOutput, error) { + req, out := c.DescribeFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeFpgaImages = "DescribeFpgaImages" // DescribeFpgaImagesRequest generates a "aws/request.Request" representing the @@ -13352,9 +13571,9 @@ func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) // // Describes one or more of your VPN connections. // -// For more information about VPN connections, see Adding a Hardware Virtual -// Private Gateway to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) -// in the Amazon Virtual Private Cloud User Guide. +// For more information about VPN connections, see AWS Managed VPN Connections +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) in the +// Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13430,8 +13649,8 @@ func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req * // // Describes one or more of your virtual private gateways. // -// For more information about virtual private gateways, see Adding an IPsec -// Hardware VPN to Your VPC (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) +// For more information about virtual private gateways, see AWS Managed VPN +// Connections (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) // in the Amazon Virtual Private Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -14855,7 +15074,7 @@ func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *reques // the Amazon EC2 API and command line interface. // // Instance console output is buffered and posted shortly after instance boot, -// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output +// reboot, and termination. Amazon EC2 preserves the most recent 64 KB output, // which is available for at least one hour after the most recent post. // // For Linux instances, the instance console output displays the exact console @@ -15093,20 +15312,24 @@ func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request. // GetPasswordData API operation for Amazon Elastic Compute Cloud. // -// Retrieves the encrypted administrator password for an instance running Windows. +// Retrieves the encrypted administrator password for a running Windows instance. // -// The Windows password is generated at boot if the EC2Config service plugin, -// Ec2SetPassword, is enabled. This usually only happens the first time an AMI -// is launched, and then Ec2SetPassword is automatically disabled. The password -// is not generated for rebundled AMIs unless Ec2SetPassword is enabled before -// bundling. +// The Windows password is generated at boot by the EC2Config service or EC2Launch +// scripts (Windows Server 2016 and later). This usually only happens the first +// time an instance is launched. For more information, see EC2Config (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/UsingConfig_WinAMI.html) +// and EC2Launch (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2launch.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// For the EC2Config service, the password is not generated for rebundled AMIs +// unless Ec2SetPassword is enabled before bundling. // // The password is encrypted using the key pair that you specified when you // launched the instance. You must provide the corresponding key pair file. // -// Password generation and encryption takes a few moments. We recommend that -// you wait up to 15 minutes after launching an instance before trying to retrieve -// the generated password. +// When you launch an instance, password generation and encryption may take +// a few minutes. If you try to retrieve the password before it's available, +// the output returns an empty string. We recommend that you wait up to 15 minutes +// after launching an instance before trying to retrieve the generated password. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15180,9 +15403,10 @@ func (c *EC2) GetReservedInstancesExchangeQuoteRequest(input *GetReservedInstanc // GetReservedInstancesExchangeQuote API operation for Amazon Elastic Compute Cloud. // -// Returns details about the values and term of your specified Convertible Reserved -// Instances. When a target configuration is specified, it returns information -// about whether the exchange is valid and can be performed. +// Returns a quote and exchange information for exchanging one or more specified +// Convertible Reserved Instances for a new Convertible Reserved Instance. If +// the exchange cannot be performed, the reason is returned in the response. +// Use AcceptReservedInstancesExchangeQuote to perform the exchange. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -15602,6 +15826,80 @@ func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, return out, req.Send() } +const opModifyFpgaImageAttribute = "ModifyFpgaImageAttribute" + +// ModifyFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyFpgaImageAttribute for more information on using the ModifyFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyFpgaImageAttributeRequest method. +// req, resp := client.ModifyFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttributeRequest(input *ModifyFpgaImageAttributeInput) (req *request.Request, output *ModifyFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opModifyFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyFpgaImageAttributeInput{} + } + + output = &ModifyFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified attribute of the specified Amazon FPGA Image (AFI). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttribute +func (c *EC2) ModifyFpgaImageAttribute(input *ModifyFpgaImageAttributeInput) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ModifyFpgaImageAttributeWithContext is the same as ModifyFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyFpgaImageAttributeWithContext(ctx aws.Context, input *ModifyFpgaImageAttributeInput, opts ...request.Option) (*ModifyFpgaImageAttributeOutput, error) { + req, out := c.ModifyFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyHosts = "ModifyHosts" // ModifyHostsRequest generates a "aws/request.Request" representing the @@ -15909,15 +16207,15 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req // ModifyImageAttribute API operation for Amazon Elastic Compute Cloud. // // Modifies the specified attribute of the specified AMI. You can specify only -// one attribute at a time. +// one attribute at a time. You can use the Attribute parameter to specify the +// attribute or one of the following parameters: Description, LaunchPermission, +// or ProductCode. // // AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace // product code cannot be made public. // -// The SriovNetSupport enhanced networking attribute cannot be changed using -// this command. Instead, enable SriovNetSupport on an instance and create an -// AMI from the instance. This will result in an image with SriovNetSupport -// enabled. +// To enable the SriovNetSupport enhanced networking attribute of an image, +// enable SriovNetSupport on an instance and create an AMI from the instance. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16242,9 +16540,9 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput // ModifyReservedInstances API operation for Amazon Elastic Compute Cloud. // // Modifies the Availability Zone, instance count, instance type, or network -// platform (EC2-Classic or EC2-VPC) of your Standard Reserved Instances. The -// Reserved Instances to be modified must be identical, except for Availability -// Zone, network platform, and instance type. +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, +// network platform, and instance type. // // For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -16970,6 +17268,89 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsWithContext(ctx aws.Context, inpu return out, req.Send() } +const opModifyVpcTenancy = "ModifyVpcTenancy" + +// ModifyVpcTenancyRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcTenancy operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpcTenancy for more information on using the ModifyVpcTenancy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpcTenancyRequest method. +// req, resp := client.ModifyVpcTenancyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancyRequest(input *ModifyVpcTenancyInput) (req *request.Request, output *ModifyVpcTenancyOutput) { + op := &request.Operation{ + Name: opModifyVpcTenancy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpcTenancyInput{} + } + + output = &ModifyVpcTenancyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpcTenancy API operation for Amazon Elastic Compute Cloud. +// +// Modifies the instance tenancy attribute of the specified VPC. You can change +// the instance tenancy attribute of a VPC to default only. You cannot change +// the instance tenancy attribute to dedicated. +// +// After you modify the tenancy of the VPC, any new instances that you launch +// into the VPC have a tenancy of default, unless you specify otherwise during +// launch. The tenancy of any existing instances in the VPC is not affected. +// +// For more information about Dedicated Instances, see Dedicated Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-instance.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpcTenancy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancy +func (c *EC2) ModifyVpcTenancy(input *ModifyVpcTenancyInput) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + return out, req.Send() +} + +// ModifyVpcTenancyWithContext is the same as ModifyVpcTenancy with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpcTenancy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpcTenancyWithContext(ctx aws.Context, input *ModifyVpcTenancyInput, opts ...request.Option) (*ModifyVpcTenancyOutput, error) { + req, out := c.ModifyVpcTenancyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -18465,6 +18846,81 @@ func (c *EC2) RequestSpotInstancesWithContext(ctx aws.Context, input *RequestSpo return out, req.Send() } +const opResetFpgaImageAttribute = "ResetFpgaImageAttribute" + +// ResetFpgaImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetFpgaImageAttribute operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetFpgaImageAttribute for more information on using the ResetFpgaImageAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetFpgaImageAttributeRequest method. +// req, resp := client.ResetFpgaImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttributeRequest(input *ResetFpgaImageAttributeInput) (req *request.Request, output *ResetFpgaImageAttributeOutput) { + op := &request.Operation{ + Name: opResetFpgaImageAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetFpgaImageAttributeInput{} + } + + output = &ResetFpgaImageAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetFpgaImageAttribute API operation for Amazon Elastic Compute Cloud. +// +// Resets the specified attribute of the specified Amazon FPGA Image (AFI) to +// its default value. You can only reset the load permission attribute. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ResetFpgaImageAttribute for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttribute +func (c *EC2) ResetFpgaImageAttribute(input *ResetFpgaImageAttributeInput) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + return out, req.Send() +} + +// ResetFpgaImageAttributeWithContext is the same as ResetFpgaImageAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See ResetFpgaImageAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ResetFpgaImageAttributeWithContext(ctx aws.Context, input *ResetFpgaImageAttributeInput, opts ...request.Option) (*ResetFpgaImageAttributeOutput, error) { + req, out := c.ResetFpgaImageAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opResetImageAttribute = "ResetImageAttribute" // ResetImageAttributeRequest generates a "aws/request.Request" representing the @@ -19115,8 +19571,8 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // not subscribed, the request fails. // // To ensure faster instance launches, break up large requests into smaller -// batches. For example, create 5 separate launch requests for 100 instances -// each instead of 1 launch request for 500 instances. +// batches. For example, create five separate launch requests for 100 instances +// each instead of one launch request for 500 instances. // // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances @@ -19290,16 +19746,20 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // StartInstances API operation for Amazon Elastic Compute Cloud. // -// Starts an Amazon EBS-backed AMI that you've previously stopped. +// Starts an Amazon EBS-backed instance that you've previously stopped. // // Instances that use Amazon EBS volumes as their root devices can be quickly // stopped and started. When an instance is stopped, the compute resources are -// released and you are not billed for hourly instance usage. However, your -// root partition Amazon EBS volume remains, continues to persist your data, -// and you are charged for Amazon EBS volume usage. You can restart your instance -// at any time. Each time you transition an instance from stopped to started, -// Amazon EC2 charges a full instance hour, even if transitions happen multiple -// times within a single hour. +// released and you are not billed for instance usage. However, your root partition +// Amazon EBS volume remains and continues to persist your data, and you are +// charged for Amazon EBS volume usage. You can restart your instance at any +// time. Every time you start your Windows instance, Amazon EC2 charges you +// for a full instance hour. If you stop and restart your Windows instance, +// a new instance hour begins and Amazon EC2 charges you for another full instance +// hour even if you are still within the same 60-minute period when it was stopped. +// Every time you start your Linux instance, Amazon EC2 charges a one-minute +// minimum for instance usage, and thereafter charges per second for instance +// usage. // // Before stopping an instance, make sure it is in a state from which it can // be restarted. Stopping an instance does not preserve data stored in RAM. @@ -19384,14 +19844,17 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // // Stops an Amazon EBS-backed instance. // -// We don't charge hourly usage for a stopped instance, or data transfer fees; -// however, your root partition Amazon EBS volume remains, continues to persist -// your data, and you are charged for Amazon EBS volume usage. Each time you -// transition an instance from stopped to started, Amazon EC2 charges a full -// instance hour, even if transitions happen multiple times within a single -// hour. +// We don't charge usage for a stopped instance, or data transfer fees; however, +// your root partition Amazon EBS volume remains and continues to persist your +// data, and you are charged for Amazon EBS volume usage. Every time you start +// your Windows instance, Amazon EC2 charges you for a full instance hour. If +// you stop and restart your Windows instance, a new instance hour begins and +// Amazon EC2 charges you for another full instance hour even if you are still +// within the same 60-minute period when it was stopped. Every time you start +// your Linux instance, Amazon EC2 charges a one-minute minimum for instance +// usage, and thereafter charges per second for instance usage. // -// You can't start or stop Spot instances, and you can't stop instance store-backed +// You can't start or stop Spot Instances, and you can't stop instance store-backed // instances. // // When you stop an instance, we shut it down. You can restart your instance @@ -19936,14 +20399,14 @@ type AcceptReservedInstancesExchangeQuoteInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The IDs of the Convertible Reserved Instances to exchange for other Convertible - // Reserved Instances of the same or higher value. + // The IDs of the Convertible Reserved Instances to exchange for another Convertible + // Reserved Instance of the same or higher value. // // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configurations of the Convertible Reserved Instance offerings that you - // are purchasing in this exchange. + // The configuration of the target Convertible Reserved Instance to exchange + // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -21759,8 +22222,7 @@ func (s *AttributeValue) SetValue(v string) *AttributeValue { type AuthorizeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` - // The CIDR IPv4 address range. We recommend that you specify the CIDR range - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the CIDR. CidrIp *string `locationName:"cidrIp" type:"string"` // Checks whether you have the required permissions for the action, without @@ -21769,8 +22231,7 @@ type AuthorizeSecurityGroupEgressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The start of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. FromPort *int64 `locationName:"fromPort" type:"integer"` // The ID of the security group. @@ -21778,26 +22239,23 @@ type AuthorizeSecurityGroupEgressInput struct { // GroupId is a required field GroupId *string `locationName:"groupId" type:"string" required:"true"` - // A set of IP permissions. You can't specify a destination security group and - // a CIDR IP address range. + // One or more sets of IP permissions. You can't specify a destination security + // group and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` - // The IP protocol name or number. We recommend that you specify the protocol - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. IpProtocol *string `locationName:"ipProtocol" type:"string"` - // The name of a destination security group. To authorize outbound access to - // a destination security group, we recommend that you use a set of IP permissions - // instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` - // The AWS account number for a destination security group. To authorize outbound - // access to a destination security group, we recommend that you use a set of - // IP permissions instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` - // The end of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -21922,8 +22380,8 @@ type AuthorizeSecurityGroupIngressInput struct { // either the security group ID or the security group name in the request. GroupName *string `type:"string"` - // A set of IP permissions. Can be used to specify multiple rules in a single - // command. + // One or more sets of IP permissions. Can be used to specify multiple rules + // in a single command. IpPermissions []*IpPermission `locationNameList:"item" type:"list"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). @@ -21943,8 +22401,8 @@ type AuthorizeSecurityGroupIngressInput struct { // be in the same VPC. SourceSecurityGroupName *string `type:"string"` - // [EC2-Classic] The AWS account number for the source security group, if the - // source security group is in a different account. You can't specify this parameter + // [EC2-Classic] The AWS account ID for the source security group, if the source + // security group is in a different account. You can't specify this parameter // in combination with the following parameters: the CIDR IP address range, // the IP protocol, the start of the port range, and the end of the port range. // Creates rules that grant full ICMP, UDP, and TCP access. To create a rule @@ -23511,6 +23969,123 @@ func (s *ConversionTask) SetTags(v []*Tag) *ConversionTask { return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageRequest +type CopyFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // The description for the new AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name for the new AFI. The default is the name of the source AFI. + Name *string `type:"string"` + + // The ID of the source AFI. + // + // SourceFpgaImageId is a required field + SourceFpgaImageId *string `type:"string" required:"true"` + + // The region that contains the source AFI. + // + // SourceRegion is a required field + SourceRegion *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyFpgaImageInput"} + if s.SourceFpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("SourceFpgaImageId")) + } + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CopyFpgaImageInput) SetClientToken(v string) *CopyFpgaImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CopyFpgaImageInput) SetDescription(v string) *CopyFpgaImageInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CopyFpgaImageInput) SetDryRun(v bool) *CopyFpgaImageInput { + s.DryRun = &v + return s +} + +// SetName sets the Name field's value. +func (s *CopyFpgaImageInput) SetName(v string) *CopyFpgaImageInput { + s.Name = &v + return s +} + +// SetSourceFpgaImageId sets the SourceFpgaImageId field's value. +func (s *CopyFpgaImageInput) SetSourceFpgaImageId(v string) *CopyFpgaImageInput { + s.SourceFpgaImageId = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopyFpgaImageInput) SetSourceRegion(v string) *CopyFpgaImageInput { + s.SourceRegion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyFpgaImageResult +type CopyFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // The ID of the new AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` +} + +// String returns the string representation +func (s CopyFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyFpgaImageOutput) GoString() string { + return s.String() +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *CopyFpgaImageOutput) SetFpgaImageId(v string) *CopyFpgaImageOutput { + s.FpgaImageId = &v + return s +} + // Contains the parameters for CopyImage. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CopyImageRequest type CopyImageInput struct { @@ -26868,11 +27443,7 @@ type CreateVpnConnectionInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the VPN connection requires static routes. If you are creating - // a VPN connection for a device that does not support BGP, you must specify - // true. - // - // Default: false + // The options for the VPN connection. Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` // The type of VPN connection (ipsec.1). @@ -27044,6 +27615,13 @@ func (s CreateVpnConnectionRouteOutput) GoString() string { type CreateVpnGatewayInput struct { _ struct{} `type:"structure"` + // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. + // If you're using a 16-bit ASN, it must be in the 64512 to 65534 range. If + // you're using a 32-bit ASN, it must be in the 4200000000 to 4294967294 range. + // + // Default: 64512 + AmazonSideAsn *int64 `type:"long"` + // The Availability Zone for the virtual private gateway. AvailabilityZone *string `type:"string"` @@ -27082,6 +27660,12 @@ func (s *CreateVpnGatewayInput) Validate() error { return nil } +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *CreateVpnGatewayInput) SetAmazonSideAsn(v int64) *CreateVpnGatewayInput { + s.AmazonSideAsn = &v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *CreateVpnGatewayInput) SetAvailabilityZone(v string) *CreateVpnGatewayInput { s.AvailabilityZone = &v @@ -27471,6 +28055,81 @@ func (s *DeleteFlowLogsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *DeleteFlo return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageRequest +type DeleteFpgaImageInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFpgaImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFpgaImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFpgaImageInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteFpgaImageInput) SetDryRun(v bool) *DeleteFpgaImageInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DeleteFpgaImageInput) SetFpgaImageId(v string) *DeleteFpgaImageInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteFpgaImageResult +type DeleteFpgaImageOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DeleteFpgaImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFpgaImageOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DeleteFpgaImageOutput) SetReturn(v bool) *DeleteFpgaImageOutput { + s.Return = &v + return s +} + // Contains the parameters for DeleteInternetGateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteInternetGatewayRequest type DeleteInternetGatewayInput struct { @@ -29983,7 +30642,7 @@ type DescribeElasticGpusOutput struct { _ struct{} `type:"structure"` // Information about the Elastic GPUs. - ElasticGpuSet []*ElasticGpus `locationName:"elasticGpuSet" type:"list"` + ElasticGpuSet []*ElasticGpus `locationName:"elasticGpuSet" locationNameList:"item" type:"list"` // The total number of items to return. If the total number of items available // is more than the value specified in max-items then a Next-Token will be provided @@ -30174,6 +30833,95 @@ func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeRequest +type DescribeFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The AFI attribute. + // + // Attribute is a required field + Attribute *string `type:"string" required:"true" enum:"FpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImageAttributeInput"} + if s.Attribute == nil { + invalidParams.Add(request.NewErrParamRequired("Attribute")) + } + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *DescribeFpgaImageAttributeInput) SetAttribute(v string) *DescribeFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeFpgaImageAttributeInput) SetDryRun(v bool) *DescribeFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *DescribeFpgaImageAttributeInput) SetFpgaImageId(v string) *DescribeFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImageAttributeResult +type DescribeFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *DescribeFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *DescribeFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest type DescribeFpgaImagesInput struct { _ struct{} `type:"structure"` @@ -31500,7 +32248,7 @@ type DescribeInstanceAttributeOutput struct { // EC2 console, CLI, or API; otherwise, you can. DisableApiTermination *AttributeBooleanValue `locationName:"disableApiTermination" type:"structure"` - // Indicates whether the instance is optimized for EBS I/O. + // Indicates whether the instance is optimized for Amazon EBS I/O. EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` // Indicates whether enhanced networking with ENA is enabled. @@ -31532,8 +32280,8 @@ type DescribeInstanceAttributeOutput struct { RootDeviceName *AttributeValue `locationName:"rootDeviceName" type:"structure"` // Indicates whether source/destination checking is enabled. A value of true - // means checking is enabled, and false means checking is disabled. This value - // must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` // Indicates whether enhanced networking with the Intel 82599 Virtual Function @@ -31964,10 +32712,10 @@ type DescribeInstancesInput struct { // | in-use). // // * network-interface.source-dest-check - Whether the network interface - // performs source/destination checking. A value of true means checking is - // enabled, and false means checking is disabled. The value must be false - // for the network interface to perform network address translation (NAT) - // in your VPC. + // performs source/destination checking. A value of true means that checking + // is enabled, and false means that checking is disabled. The value must + // be false for the network interface to perform network address translation + // (NAT) in your VPC. // // * network-interface.subnet-id - The ID of the subnet for the network interface. // @@ -32002,9 +32750,9 @@ type DescribeInstancesInput struct { // ID is created any time you launch an instance. A reservation ID has a // one-to-one relationship with an instance launch request, but can be associated // with more than one instance if you launch multiple instances using the - // same launch request. For example, if you launch one instance, you'll get + // same launch request. For example, if you launch one instance, you get // one reservation ID. If you launch ten instances using the same launch - // request, you'll also get one reservation ID. + // request, you also get one reservation ID. // // * root-device-name - The name of the root device for the instance (for // example, /dev/sda1 or /dev/xvda). @@ -32014,10 +32762,10 @@ type DescribeInstancesInput struct { // // * source-dest-check - Indicates whether the instance performs source/destination // checking. A value of true means that checking is enabled, and false means - // checking is disabled. The value must be false for the instance to perform - // network address translation (NAT) in your VPC. + // that checking is disabled. The value must be false for the instance to + // perform network address translation (NAT) in your VPC. // - // * spot-instance-request-id - The ID of the Spot instance request. + // * spot-instance-request-id - The ID of the Spot Instance request. // // * state-reason-code - The reason code for the state change. // @@ -32034,9 +32782,8 @@ type DescribeInstancesInput struct { // independent of the tag-value filter. For example, if you use both the // filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources // assigned both the tag key Purpose (regardless of what the tag's value - // is), and the tag value X (regardless of what the tag's key is). If you - // want to list only resources where Purpose is X, see the tag:key=value - // filter. + // is), and the tag value X (regardless of the tag's key). If you want to + // list only resources where Purpose is X, see the tag:key=value filter. // // * tag-value - The value of a tag assigned to the resource. This filter // is independent of the tag-key filter. @@ -34477,6 +35224,14 @@ type DescribeSecurityGroupsInput struct { // // Default: Describes all your security groups. GroupNames []*string `locationName:"GroupName" locationNameList:"GroupName" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another request with the returned NextToken value. + // This value can be between 5 and 1000. + MaxResults *int64 `type:"integer"` + + // The token to request the next page of results. + NextToken *string `type:"string"` } // String returns the string representation @@ -34513,11 +35268,27 @@ func (s *DescribeSecurityGroupsInput) SetGroupNames(v []*string) *DescribeSecuri return s } +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSecurityGroupsInput) SetMaxResults(v int64) *DescribeSecurityGroupsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsInput) SetNextToken(v string) *DescribeSecurityGroupsInput { + s.NextToken = &v + return s +} + // Contains the output of DescribeSecurityGroups. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSecurityGroupsResult type DescribeSecurityGroupsOutput struct { _ struct{} `type:"structure"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + // Information about one or more security groups. SecurityGroups []*SecurityGroup `locationName:"securityGroupInfo" locationNameList:"item" type:"list"` } @@ -34532,6 +35303,12 @@ func (s DescribeSecurityGroupsOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *DescribeSecurityGroupsOutput) SetNextToken(v string) *DescribeSecurityGroupsOutput { + s.NextToken = &v + return s +} + // SetSecurityGroups sets the SecurityGroups field's value. func (s *DescribeSecurityGroupsOutput) SetSecurityGroups(v []*SecurityGroup) *DescribeSecurityGroupsOutput { s.SecurityGroups = v @@ -35305,34 +36082,34 @@ type DescribeSpotInstanceRequestsInput struct { // // * launch.key-name - The name of the key pair the instance launched with. // - // * launch.monitoring-enabled - Whether monitoring is enabled for the Spot - // instance. + // * launch.monitoring-enabled - Whether detailed monitoring is enabled for + // the Spot instance. // // * launch.ramdisk-id - The RAM disk ID. // - // * network-interface.network-interface-id - The ID of the network interface. + // * launched-availability-zone - The Availability Zone in which the bid + // is launched. // - // * network-interface.device-index - The index of the device for the network - // interface attachment on the instance. - // - // * network-interface.subnet-id - The ID of the subnet for the instance. - // - // * network-interface.description - A description of the network interface. - // - // * network-interface.private-ip-address - The primary private IP address - // of the network interface. + // * network-interface.addresses.primary - Indicates whether the IP address + // is the primary private IP address. // // * network-interface.delete-on-termination - Indicates whether the network // interface is deleted when the instance is terminated. // + // * network-interface.description - A description of the network interface. + // + // * network-interface.device-index - The index of the device for the network + // interface attachment on the instance. + // // * network-interface.group-id - The ID of the security group associated // with the network interface. // - // * network-interface.group-name - The name of the security group associated - // with the network interface. + // * network-interface.network-interface-id - The ID of the network interface. // - // * network-interface.addresses.primary - Indicates whether the IP address - // is the primary private IP address. + // * network-interface.private-ip-address - The primary private IP address + // of the network interface. + // + // * network-interface.subnet-id - The ID of the subnet for the instance. // // * product-description - The product description associated with the instance // (Linux/UNIX | Windows). @@ -35372,9 +36149,6 @@ type DescribeSpotInstanceRequestsInput struct { // // * type - The type of Spot instance request (one-time | persistent). // - // * launched-availability-zone - The Availability Zone in which the bid - // is launched. - // // * valid-from - The start date of the request. // // * valid-until - The end date of the request. @@ -37291,6 +38065,9 @@ type DescribeVpnGatewaysInput struct { // One or more filters. // + // * amazon-side-asn - The Autonomous System Number (ASN) for the Amazon + // side of the gateway. + // // * attachment.state - The current state of the attachment between the gateway // and the VPC (attaching | attached | detaching | detached). // @@ -39763,6 +40540,9 @@ type FpgaImage struct { // The product codes for the AFI. ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` + // Indicates whether the AFI is public. + Public *bool `locationName:"public" type:"boolean"` + // The version of the AWS Shell that was used to create the bitstream. ShellVersion *string `locationName:"shellVersion" type:"string"` @@ -39840,6 +40620,12 @@ func (s *FpgaImage) SetProductCodes(v []*ProductCode) *FpgaImage { return s } +// SetPublic sets the Public field's value. +func (s *FpgaImage) SetPublic(v bool) *FpgaImage { + s.Public = &v + return s +} + // SetShellVersion sets the ShellVersion field's value. func (s *FpgaImage) SetShellVersion(v string) *FpgaImage { s.ShellVersion = &v @@ -39864,6 +40650,67 @@ func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage { return s } +// Describes an Amazon FPGA image (AFI) attribute. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageAttribute +type FpgaImageAttribute struct { + _ struct{} `type:"structure"` + + // The description of the AFI. + Description *string `locationName:"description" type:"string"` + + // The ID of the AFI. + FpgaImageId *string `locationName:"fpgaImageId" type:"string"` + + // One or more load permissions. + LoadPermissions []*LoadPermission `locationName:"loadPermissions" locationNameList:"item" type:"list"` + + // The name of the AFI. + Name *string `locationName:"name" type:"string"` + + // One or more product codes. + ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s FpgaImageAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FpgaImageAttribute) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *FpgaImageAttribute) SetDescription(v string) *FpgaImageAttribute { + s.Description = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *FpgaImageAttribute) SetFpgaImageId(v string) *FpgaImageAttribute { + s.FpgaImageId = &v + return s +} + +// SetLoadPermissions sets the LoadPermissions field's value. +func (s *FpgaImageAttribute) SetLoadPermissions(v []*LoadPermission) *FpgaImageAttribute { + s.LoadPermissions = v + return s +} + +// SetName sets the Name field's value. +func (s *FpgaImageAttribute) SetName(v string) *FpgaImageAttribute { + s.Name = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *FpgaImageAttribute) SetProductCodes(v []*ProductCode) *FpgaImageAttribute { + s.ProductCodes = v + return s +} + // Describes the state of the bitstream generation process for an Amazon FPGA // image (AFI). // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageState @@ -40163,7 +41010,7 @@ type GetHostReservationPurchasePreviewOutput struct { // The purchase information of the Dedicated Host Reservation and the Dedicated // Hosts associated with it. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The potential total hourly price of the reservation per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -40266,7 +41113,8 @@ type GetPasswordDataOutput struct { // The ID of the Windows instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The password of the instance. + // The password of the instance. Returns an empty string if the password is + // not available. PasswordData *string `locationName:"passwordData" type:"string"` // The time the data was last updated. @@ -40317,7 +41165,7 @@ type GetReservedInstancesExchangeQuoteInput struct { // ReservedInstanceIds is a required field ReservedInstanceIds []*string `locationName:"ReservedInstanceId" locationNameList:"ReservedInstanceId" type:"list" required:"true"` - // The configuration requirements of the Convertible Reserved Instances to exchange + // The configuration of the target Convertible Reserved Instance to exchange // for your current Convertible Reserved Instances. TargetConfigurations []*TargetConfigurationRequest `locationName:"TargetConfiguration" locationNameList:"TargetConfigurationRequest" type:"list"` } @@ -42690,7 +43538,7 @@ type Instance struct { // The idempotency token you provided when you launched the instance, if applicable. ClientToken *string `locationName:"clientToken" type:"string"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS @@ -42715,7 +43563,7 @@ type Instance struct { // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // Indicates whether this is a Spot instance or a Scheduled Instance. + // Indicates whether this is a Spot Instance or a Scheduled Instance. InstanceLifecycle *string `locationName:"instanceLifecycle" type:"string" enum:"InstanceLifecycleType"` // The instance type. @@ -42747,7 +43595,7 @@ type Instance struct { // DNS hostname can only be used inside the Amazon EC2 network. This name is // not available until the instance enters the running state. // - // [EC2-VPC] The Amazon-provided DNS server will resolve Amazon-provided private + // [EC2-VPC] The Amazon-provided DNS server resolves Amazon-provided private // DNS hostnames if you've enabled DNS resolution and DNS hostnames in your // VPC. If you are not using the Amazon-provided DNS server in your VPC, your // custom domain name servers must resolve the hostname as appropriate. @@ -42782,13 +43630,13 @@ type Instance struct { // Specifies whether to enable an instance launched in a VPC to perform NAT. // This controls whether source/destination checking is enabled on the instance. - // A value of true means checking is enabled, and false means checking is disabled. - // The value must be false for the instance to perform NAT. For more information, - // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) + // A value of true means that checking is enabled, and false means that checking + // is disabled. The value must be false for the instance to perform NAT. For + // more information, see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) // in the Amazon Virtual Private Cloud User Guide. SourceDestCheck *bool `locationName:"sourceDestCheck" type:"boolean"` - // If the request is a Spot instance request, the ID of the request. + // If the request is a Spot Instance request, the ID of the request. SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` // Specifies whether enhanced networking with the Intel 82599 Virtual Function @@ -44174,7 +45022,7 @@ func (s *InternetGatewayAttachment) SetVpcId(v string) *InternetGatewayAttachmen return s } -// Describes a security group rule. +// Describes a set of permissions for a security group rule. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/IpPermission type IpPermission struct { _ struct{} `type:"structure"` @@ -44273,7 +45121,7 @@ type IpRange struct { _ struct{} `type:"structure"` // The IPv4 CIDR range. You can either specify a CIDR range or a source security - // group, not both. To specify a single IPv4 address, use the /32 prefix. + // group, not both. To specify a single IPv4 address, use the /32 prefix length. CidrIp *string `locationName:"cidrIp" type:"string"` // A description for the security group rule that references this IPv4 address @@ -44337,7 +45185,7 @@ type Ipv6Range struct { _ struct{} `type:"structure"` // The IPv6 CIDR range. You can either specify a CIDR range or a source security - // group, not both. To specify a single IPv6 address, use the /128 prefix. + // group, not both. To specify a single IPv6 address, use the /128 prefix length. CidrIpv6 *string `locationName:"cidrIpv6" type:"string"` // A description for the security group rule that references this IPv6 address @@ -44485,9 +45333,6 @@ type LaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -44641,6 +45486,259 @@ func (s *LaunchSpecification) SetUserData(v string) *LaunchSpecification { return s } +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermission +type LoadPermission struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `locationName:"group" type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `locationName:"userId" type:"string"` +} + +// String returns the string representation +func (s LoadPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermission) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermission) SetGroup(v string) *LoadPermission { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermission) SetUserId(v string) *LoadPermission { + s.UserId = &v + return s +} + +// Describes modifications to the load permissions of an Amazon FPGA image (AFI). +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionModifications +type LoadPermissionModifications struct { + _ struct{} `type:"structure"` + + // The load permissions to add. + Add []*LoadPermissionRequest `locationNameList:"item" type:"list"` + + // The load permissions to remove. + Remove []*LoadPermissionRequest `locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s LoadPermissionModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionModifications) GoString() string { + return s.String() +} + +// SetAdd sets the Add field's value. +func (s *LoadPermissionModifications) SetAdd(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Add = v + return s +} + +// SetRemove sets the Remove field's value. +func (s *LoadPermissionModifications) SetRemove(v []*LoadPermissionRequest) *LoadPermissionModifications { + s.Remove = v + return s +} + +// Describes a load permission. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/LoadPermissionRequest +type LoadPermissionRequest struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *string `type:"string" enum:"PermissionGroup"` + + // The AWS account ID. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s LoadPermissionRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadPermissionRequest) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *LoadPermissionRequest) SetGroup(v string) *LoadPermissionRequest { + s.Group = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *LoadPermissionRequest) SetUserId(v string) *LoadPermissionRequest { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeRequest +type ModifyFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Attribute *string `type:"string" enum:"FpgaImageAttributeName"` + + // A description for the AFI. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` + + // The load permission for the AFI. + LoadPermission *LoadPermissionModifications `type:"structure"` + + // A name for the AFI. + Name *string `type:"string"` + + // The operation type. + OperationType *string `type:"string" enum:"OperationType"` + + // One or more product codes. After you add a product code to an AFI, it can't + // be removed. This parameter is valid only when modifying the productCodes + // attribute. + ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` + + // One or more user groups. This parameter is valid only when modifying the + // loadPermission attribute. + UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` + + // One or more AWS account IDs. This parameter is valid only when modifying + // the loadPermission attribute. + UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ModifyFpgaImageAttributeInput) SetAttribute(v string) *ModifyFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ModifyFpgaImageAttributeInput) SetDescription(v string) *ModifyFpgaImageAttributeInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyFpgaImageAttributeInput) SetDryRun(v bool) *ModifyFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ModifyFpgaImageAttributeInput) SetFpgaImageId(v string) *ModifyFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// SetLoadPermission sets the LoadPermission field's value. +func (s *ModifyFpgaImageAttributeInput) SetLoadPermission(v *LoadPermissionModifications) *ModifyFpgaImageAttributeInput { + s.LoadPermission = v + return s +} + +// SetName sets the Name field's value. +func (s *ModifyFpgaImageAttributeInput) SetName(v string) *ModifyFpgaImageAttributeInput { + s.Name = &v + return s +} + +// SetOperationType sets the OperationType field's value. +func (s *ModifyFpgaImageAttributeInput) SetOperationType(v string) *ModifyFpgaImageAttributeInput { + s.OperationType = &v + return s +} + +// SetProductCodes sets the ProductCodes field's value. +func (s *ModifyFpgaImageAttributeInput) SetProductCodes(v []*string) *ModifyFpgaImageAttributeInput { + s.ProductCodes = v + return s +} + +// SetUserGroups sets the UserGroups field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserGroups(v []*string) *ModifyFpgaImageAttributeInput { + s.UserGroups = v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyFpgaImageAttributeInput) SetUserIds(v []*string) *ModifyFpgaImageAttributeInput { + s.UserIds = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyFpgaImageAttributeResult +type ModifyFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Information about the attribute. + FpgaImageAttribute *FpgaImageAttribute `locationName:"fpgaImageAttribute" type:"structure"` +} + +// String returns the string representation +func (s ModifyFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetFpgaImageAttribute sets the FpgaImageAttribute field's value. +func (s *ModifyFpgaImageAttributeOutput) SetFpgaImageAttribute(v *FpgaImageAttribute) *ModifyFpgaImageAttributeOutput { + s.FpgaImageAttribute = v + return s +} + // Contains the parameters for ModifyHosts. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyHostsRequest type ModifyHostsInput struct { @@ -44889,10 +45987,11 @@ func (s ModifyIdentityIdFormatOutput) GoString() string { type ModifyImageAttributeInput struct { _ struct{} `type:"structure"` - // The name of the attribute to modify. + // The name of the attribute to modify. The valid values are description, launchPermission, + // and productCodes. Attribute *string `type:"string"` - // A description for the AMI. + // A new description for the AMI. Description *AttributeValue `type:"structure"` // Checks whether you have the required permissions for the action, without @@ -44906,26 +46005,27 @@ type ModifyImageAttributeInput struct { // ImageId is a required field ImageId *string `type:"string" required:"true"` - // A launch permission modification. + // A new launch permission for the AMI. LaunchPermission *LaunchPermissionModifications `type:"structure"` - // The operation type. + // The operation type. This parameter can be used only when the Attribute parameter + // is launchPermission. OperationType *string `type:"string" enum:"OperationType"` - // One or more product codes. After you add a product code to an AMI, it can't - // be removed. This is only valid when modifying the productCodes attribute. + // One or more DevPay product codes. After you add a product code to an AMI, + // it can't be removed. ProductCodes []*string `locationName:"ProductCode" locationNameList:"ProductCode" type:"list"` - // One or more user groups. This is only valid when modifying the launchPermission - // attribute. + // One or more user groups. This parameter can be used only when the Attribute + // parameter is launchPermission. UserGroups []*string `locationName:"UserGroup" locationNameList:"UserGroup" type:"list"` - // One or more AWS account IDs. This is only valid when modifying the launchPermission - // attribute. + // One or more AWS account IDs. This parameter can be used only when the Attribute + // parameter is launchPermission. UserIds []*string `locationName:"UserId" locationNameList:"UserId" type:"list"` - // The value of the attribute being modified. This is only valid when modifying - // the description attribute. + // The value of the attribute being modified. This parameter can be used only + // when the Attribute parameter is description or productCodes. Value *string `type:"string"` } @@ -45057,7 +46157,7 @@ type ModifyInstanceAttributeInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the instance is optimized for EBS I/O. This optimization + // Specifies whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration // stack to provide optimal EBS I/O performance. This optimization isn't available // with all instance types. Additional usage charges apply when using an EBS @@ -45100,8 +46200,8 @@ type ModifyInstanceAttributeInput struct { Ramdisk *AttributeValue `locationName:"ramdisk" type:"structure"` // Specifies whether source/destination checking is enabled. A value of true - // means that checking is enabled, and false means checking is disabled. This - // value must be false for a NAT instance to perform NAT. + // means that checking is enabled, and false means that checking is disabled. + // This value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `type:"structure"` // Set to simple to enable enhanced networking with the Intel 82599 Virtual @@ -45115,8 +46215,8 @@ type ModifyInstanceAttributeInput struct { SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` // Changes the instance's user data to the specified value. If you are using - // an AWS SDK or command line tool, Base64-encoding is performed for you, and - // you can load the text from a file. Otherwise, you must provide Base64-encoded + // an AWS SDK or command line tool, base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide base64-encoded // text. UserData *BlobAttributeValue `locationName:"userData" type:"structure"` @@ -46327,6 +47427,97 @@ func (s *ModifyVpcPeeringConnectionOptionsOutput) SetRequesterPeeringConnectionO return s } +// Contains the parameters for ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyRequest +type ModifyVpcTenancyInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The instance tenancy attribute for the VPC. + // + // InstanceTenancy is a required field + InstanceTenancy *string `type:"string" required:"true" enum:"VpcTenancy"` + + // The ID of the VPC. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpcTenancyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpcTenancyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpcTenancyInput"} + if s.InstanceTenancy == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceTenancy")) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpcTenancyInput) SetDryRun(v bool) *ModifyVpcTenancyInput { + s.DryRun = &v + return s +} + +// SetInstanceTenancy sets the InstanceTenancy field's value. +func (s *ModifyVpcTenancyInput) SetInstanceTenancy(v string) *ModifyVpcTenancyInput { + s.InstanceTenancy = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *ModifyVpcTenancyInput) SetVpcId(v string) *ModifyVpcTenancyInput { + s.VpcId = &v + return s +} + +// Contains the output of ModifyVpcTenancy. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpcTenancyResult +type ModifyVpcTenancyOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + ReturnValue *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyVpcTenancyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpcTenancyOutput) GoString() string { + return s.String() +} + +// SetReturnValue sets the ReturnValue field's value. +func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput { + s.ReturnValue = &v + return s +} + // Contains the parameters for MonitorInstances. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/MonitorInstancesRequest type MonitorInstancesInput struct { @@ -48443,7 +49634,7 @@ type PurchaseHostReservationOutput struct { CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` // Describes the details of the purchase. - Purchase []*Purchase `locationName:"purchase" type:"list"` + Purchase []*Purchase `locationName:"purchase" locationNameList:"item" type:"list"` // The total hourly price of the reservation calculated per hour. TotalHourlyPrice *string `locationName:"totalHourlyPrice" type:"string"` @@ -49887,7 +51078,7 @@ type ReportInstanceStatusInput struct { // Instances is a required field Instances []*string `locationName:"instanceId" locationNameList:"InstanceId" type:"list" required:"true"` - // One or more reason codes that describes the health state of your instance. + // One or more reason codes that describe the health state of your instance. // // * instance-stuck-in-state: My instance is stuck in a state. // @@ -49898,13 +51089,13 @@ type ReportInstanceStatusInput struct { // * password-not-available: A password is not available for my instance. // // * performance-network: My instance is experiencing performance problems - // which I believe are network related. + // that I believe are network related. // // * performance-instance-store: My instance is experiencing performance - // problems which I believe are related to the instance stores. + // problems that I believe are related to the instance stores. // // * performance-ebs-volume: My instance is experiencing performance problems - // which I believe are related to an EBS volume. + // that I believe are related to an EBS volume. // // * performance-other: My instance is experiencing performance problems. // @@ -50147,6 +51338,9 @@ type RequestSpotInstancesInput struct { // Default: 1 InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `type:"string" enum:"InstanceInterruptionBehavior"` + // The instance launch group. Launch groups are Spot instances that launch together // and terminate together. // @@ -50243,6 +51437,12 @@ func (s *RequestSpotInstancesInput) SetInstanceCount(v int64) *RequestSpotInstan return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *RequestSpotInstancesInput) SetInstanceInterruptionBehavior(v string) *RequestSpotInstancesInput { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchGroup sets the LaunchGroup field's value. func (s *RequestSpotInstancesInput) SetLaunchGroup(v string) *RequestSpotInstancesInput { s.LaunchGroup = &v @@ -50313,9 +51513,6 @@ type RequestSpotLaunchSpecification struct { AddressingType *string `locationName:"addressingType" type:"string"` // One or more block device mapping entries. - // - // Although you can specify encrypted EBS volumes in this block device mapping - // for your Spot Instances, these volumes are not encrypted. BlockDeviceMappings []*BlockDeviceMapping `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` // Indicates whether the instance is optimized for EBS I/O. This optimization @@ -51350,6 +52547,90 @@ func (s *ReservedInstancesOffering) SetUsagePrice(v float64) *ReservedInstancesO return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeRequest +type ResetFpgaImageAttributeInput struct { + _ struct{} `type:"structure"` + + // The attribute. + Attribute *string `type:"string" enum:"ResetFpgaImageAttributeName"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AFI. + // + // FpgaImageId is a required field + FpgaImageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetFpgaImageAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetFpgaImageAttributeInput"} + if s.FpgaImageId == nil { + invalidParams.Add(request.NewErrParamRequired("FpgaImageId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttribute sets the Attribute field's value. +func (s *ResetFpgaImageAttributeInput) SetAttribute(v string) *ResetFpgaImageAttributeInput { + s.Attribute = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ResetFpgaImageAttributeInput) SetDryRun(v bool) *ResetFpgaImageAttributeInput { + s.DryRun = &v + return s +} + +// SetFpgaImageId sets the FpgaImageId field's value. +func (s *ResetFpgaImageAttributeInput) SetFpgaImageId(v string) *ResetFpgaImageAttributeInput { + s.FpgaImageId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetFpgaImageAttributeResult +type ResetFpgaImageAttributeOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ResetFpgaImageAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetFpgaImageAttributeOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ResetFpgaImageAttributeOutput) SetReturn(v bool) *ResetFpgaImageAttributeOutput { + s.Return = &v + return s +} + // Contains the parameters for ResetImageAttribute. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ResetImageAttributeRequest type ResetImageAttributeInput struct { @@ -51765,8 +53046,7 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla type RevokeSecurityGroupEgressInput struct { _ struct{} `type:"structure"` - // The CIDR IP address range. We recommend that you specify the CIDR range in - // a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the CIDR. CidrIp *string `locationName:"cidrIp" type:"string"` // Checks whether you have the required permissions for the action, without @@ -51775,8 +53055,7 @@ type RevokeSecurityGroupEgressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The start of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. FromPort *int64 `locationName:"fromPort" type:"integer"` // The ID of the security group. @@ -51784,26 +53063,23 @@ type RevokeSecurityGroupEgressInput struct { // GroupId is a required field GroupId *string `locationName:"groupId" type:"string" required:"true"` - // A set of IP permissions. You can't specify a destination security group and - // a CIDR IP address range. + // One or more sets of IP permissions. You can't specify a destination security + // group and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationName:"ipPermissions" locationNameList:"item" type:"list"` - // The IP protocol name or number. We recommend that you specify the protocol - // in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the protocol name or + // number. IpProtocol *string `locationName:"ipProtocol" type:"string"` - // The name of a destination security group. To revoke outbound access to a - // destination security group, we recommend that you use a set of IP permissions - // instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupName *string `locationName:"sourceSecurityGroupName" type:"string"` - // The AWS account number for a destination security group. To revoke outbound - // access to a destination security group, we recommend that you use a set of - // IP permissions instead. + // Not supported. Use a set of IP permissions to specify a destination security + // group. SourceSecurityGroupOwnerId *string `locationName:"sourceSecurityGroupOwnerId" type:"string"` - // The end of port range for the TCP and UDP protocols, or an ICMP type number. - // We recommend that you specify the port range in a set of IP permissions instead. + // Not supported. Use a set of IP permissions to specify the port. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -51918,15 +53194,17 @@ type RevokeSecurityGroupIngressInput struct { // For the ICMP type number, use -1 to specify all ICMP types. FromPort *int64 `type:"integer"` - // The ID of the security group. Required for a security group in a nondefault - // VPC. + // The ID of the security group. You must specify either the security group + // ID or the security group name in the request. For security groups in a nondefault + // VPC, you must specify the security group ID. GroupId *string `type:"string"` - // [EC2-Classic, default VPC] The name of the security group. + // [EC2-Classic, default VPC] The name of the security group. You must specify + // either the security group ID or the security group name in the request. GroupName *string `type:"string"` - // A set of IP permissions. You can't specify a source security group and a - // CIDR IP address range. + // One or more sets of IP permissions. You can't specify a source security group + // and a CIDR IP address range in the same set of permissions. IpPermissions []*IpPermission `locationNameList:"item" type:"list"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). @@ -52332,11 +53610,11 @@ type RunInstancesInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Indicates whether the instance is optimized for EBS I/O. This optimization + // Indicates whether the instance is optimized for Amazon EBS I/O. This optimization // provides dedicated throughput to Amazon EBS and an optimized configuration - // stack to provide optimal EBS I/O performance. This optimization isn't available - // with all instance types. Additional usage charges apply when using an EBS-optimized - // instance. + // stack to provide optimal Amazon EBS I/O performance. This optimization isn't + // available with all instance types. Additional usage charges apply when using + // an EBS-optimized instance. // // Default: false EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` @@ -52463,9 +53741,9 @@ type RunInstancesInput struct { // The user data to make available to the instance. For more information, see // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). If you are using an AWS SDK or command line tool, Base64-encoding - // is performed for you, and you can load the text from a file. Otherwise, you - // must provide Base64-encoded text. + // (Windows). If you are using a command line tool, base64-encoding is performed + // for you, and you can load the text from a file. Otherwise, you must provide + // base64-encoded text. UserData *string `type:"string"` } @@ -54984,6 +56262,9 @@ type SpotFleetRequestConfigData struct { // IamFleetRole is a required field IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + // Information about the launch specifications for the Spot fleet request. // // LaunchSpecifications is a required field @@ -55103,6 +56384,12 @@ func (s *SpotFleetRequestConfigData) SetIamFleetRole(v string) *SpotFleetRequest return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotFleetRequestConfigData) SetInstanceInterruptionBehavior(v string) *SpotFleetRequestConfigData { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchSpecifications sets the LaunchSpecifications field's value. func (s *SpotFleetRequestConfigData) SetLaunchSpecifications(v []*SpotFleetLaunchSpecification) *SpotFleetRequestConfigData { s.LaunchSpecifications = v @@ -55214,6 +56501,9 @@ type SpotInstanceRequest struct { // request. InstanceId *string `locationName:"instanceId" type:"string"` + // Indicates whether a Spot instance stops or terminates when it is interrupted. + InstanceInterruptionBehavior *string `locationName:"instanceInterruptionBehavior" type:"string" enum:"InstanceInterruptionBehavior"` + // The instance launch group. Launch groups are Spot instances that launch together // and terminate together. LaunchGroup *string `locationName:"launchGroup" type:"string"` @@ -55306,6 +56596,12 @@ func (s *SpotInstanceRequest) SetInstanceId(v string) *SpotInstanceRequest { return s } +// SetInstanceInterruptionBehavior sets the InstanceInterruptionBehavior field's value. +func (s *SpotInstanceRequest) SetInstanceInterruptionBehavior(v string) *SpotInstanceRequest { + s.InstanceInterruptionBehavior = &v + return s +} + // SetLaunchGroup sets the LaunchGroup field's value. func (s *SpotInstanceRequest) SetLaunchGroup(v string) *SpotInstanceRequest { s.LaunchGroup = &v @@ -55818,7 +57114,7 @@ type StateReason struct { // // * Server.ScheduledStop: The instance was stopped due to a scheduled retirement. // - // * Server.SpotInstanceTermination: A Spot instance was terminated due to + // * Server.SpotInstanceTermination: A Spot Instance was terminated due to // an increase in the market price. // // * Client.InternalError: A client error caused the instance to terminate @@ -58507,6 +59803,12 @@ func (s *VpcPeeringConnectionVpcInfo) SetVpcId(v string) *VpcPeeringConnectionVp type VpnConnection struct { _ struct{} `type:"structure"` + // The category of the VPN connection. A value of VPN indicates an AWS VPN connection. + // A value of VPN-Classic indicates an AWS Classic VPN connection. For more + // information, see AWS Managed VPN Categories (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html#vpn-categories) + // in the Amazon Virtual Private Cloud User Guide. + Category *string `locationName:"category" type:"string"` + // The configuration information for the VPN connection's customer gateway (in // the native XML format). This element is always present in the CreateVpnConnection // response; however, it's present in the DescribeVpnConnections response only @@ -58551,6 +59853,12 @@ func (s VpnConnection) GoString() string { return s.String() } +// SetCategory sets the Category field's value. +func (s *VpnConnection) SetCategory(v string) *VpnConnection { + s.Category = &v + return s +} + // SetCustomerGatewayConfiguration sets the CustomerGatewayConfiguration field's value. func (s *VpnConnection) SetCustomerGatewayConfiguration(v string) *VpnConnection { s.CustomerGatewayConfiguration = &v @@ -58642,9 +59950,15 @@ func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions type VpnConnectionOptionsSpecification struct { _ struct{} `type:"structure"` - // Indicates whether the VPN connection uses static routes only. Static routes - // must be used for devices that don't support BGP. + // Indicate whether the VPN connection uses static routes only. If you are creating + // a VPN connection for a device that does not support BGP, you must specify + // true. + // + // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + + // The tunnel options for the VPN connection. + TunnelOptions []*VpnTunnelOptionsSpecification `locationNameList:"item" type:"list"` } // String returns the string representation @@ -58663,11 +59977,20 @@ func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConn return s } +// SetTunnelOptions sets the TunnelOptions field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { + s.TunnelOptions = v + return s +} + // Describes a virtual private gateway. // Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnGateway type VpnGateway struct { _ struct{} `type:"structure"` + // The private Autonomous System Number (ASN) for the Amazon side of a BGP session. + AmazonSideAsn *int64 `locationName:"amazonSideAsn" type:"long"` + // The Availability Zone where the virtual private gateway was created, if applicable. // This field may be empty or not returned. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` @@ -58698,6 +60021,12 @@ func (s VpnGateway) GoString() string { return s.String() } +// SetAmazonSideAsn sets the AmazonSideAsn field's value. +func (s *VpnGateway) SetAmazonSideAsn(v int64) *VpnGateway { + s.AmazonSideAsn = &v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *VpnGateway) SetAvailabilityZone(v string) *VpnGateway { s.AvailabilityZone = &v @@ -58777,6 +60106,63 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { return s } +// The tunnel options for a VPN connection. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/VpnTunnelOptionsSpecification +type VpnTunnelOptionsSpecification struct { + _ struct{} `type:"structure"` + + // The pre-shared key (PSK) to establish initial authentication between the + // virtual private gateway and customer gateway. + // + // Constraints: Allowed characters are alphanumeric characters and ._. Must + // be between 8 and 64 characters in length and cannot start with zero (0). + PreSharedKey *string `type:"string"` + + // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same virtual private + // gateway. + // + // Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The following + // CIDR blocks are reserved and cannot be used: + // + // * 169.254.0.0/30 + // + // * 169.254.1.0/30 + // + // * 169.254.2.0/30 + // + // * 169.254.3.0/30 + // + // * 169.254.4.0/30 + // + // * 169.254.5.0/30 + // + // * 169.254.169.252/30 + TunnelInsideCidr *string `type:"string"` +} + +// String returns the string representation +func (s VpnTunnelOptionsSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnTunnelOptionsSpecification) GoString() string { + return s.String() +} + +// SetPreSharedKey sets the PreSharedKey field's value. +func (s *VpnTunnelOptionsSpecification) SetPreSharedKey(v string) *VpnTunnelOptionsSpecification { + s.PreSharedKey = &v + return s +} + +// SetTunnelInsideCidr sets the TunnelInsideCidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideCidr = &v + return s +} + const ( // AccountAttributeNameSupportedPlatforms is a AccountAttributeName enum value AccountAttributeNameSupportedPlatforms = "supported-platforms" @@ -59105,6 +60491,20 @@ const ( FlowLogsResourceTypeNetworkInterface = "NetworkInterface" ) +const ( + // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value + FpgaImageAttributeNameDescription = "description" + + // FpgaImageAttributeNameName is a FpgaImageAttributeName enum value + FpgaImageAttributeNameName = "name" + + // FpgaImageAttributeNameLoadPermission is a FpgaImageAttributeName enum value + FpgaImageAttributeNameLoadPermission = "loadPermission" + + // FpgaImageAttributeNameProductCodes is a FpgaImageAttributeName enum value + FpgaImageAttributeNameProductCodes = "productCodes" +) + const ( // FpgaImageStateCodePending is a FpgaImageStateCode enum value FpgaImageStateCodePending = "pending" @@ -59263,6 +60663,14 @@ const ( InstanceHealthStatusUnhealthy = "unhealthy" ) +const ( + // InstanceInterruptionBehaviorStop is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorStop = "stop" + + // InstanceInterruptionBehaviorTerminate is a InstanceInterruptionBehavior enum value + InstanceInterruptionBehaviorTerminate = "terminate" +) + const ( // InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value InstanceLifecycleTypeSpot = "spot" @@ -59517,6 +60925,15 @@ const ( // InstanceTypeP216xlarge is a InstanceType enum value InstanceTypeP216xlarge = "p2.16xlarge" + // InstanceTypeP32xlarge is a InstanceType enum value + InstanceTypeP32xlarge = "p3.2xlarge" + + // InstanceTypeP38xlarge is a InstanceType enum value + InstanceTypeP38xlarge = "p3.8xlarge" + + // InstanceTypeP316xlarge is a InstanceType enum value + InstanceTypeP316xlarge = "p3.16xlarge" + // InstanceTypeD2Xlarge is a InstanceType enum value InstanceTypeD2Xlarge = "d2.xlarge" @@ -59829,6 +61246,11 @@ const ( ReservedInstanceStateRetired = "retired" ) +const ( + // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value + ResetFpgaImageAttributeNameLoadPermission = "loadPermission" +) + const ( // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value ResetImageAttributeNameLaunchPermission = "launchPermission" @@ -60250,6 +61672,11 @@ const ( VpcStateAvailable = "available" ) +const ( + // VpcTenancyDefault is a VpcTenancy enum value + VpcTenancyDefault = "default" +) + const ( // VpnStatePending is a VpnState enum value VpnStatePending = "pending" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go index 6914a666bb..0469f0f01a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -1025,6 +1025,11 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context, Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Expected: "fulfilled", }, + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", + Expected: "request-canceled-and-instance-running", + }, { State: request.FailureWaiterState, Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index db2eb744f2..e667a099d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -384,6 +384,10 @@ func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *requ // The request was rejected because an invalid or out-of-range value was supplied // for an input parameter. // +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception // or failure. @@ -495,6 +499,10 @@ func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *reques // the name of the service that depends on this service-linked role. You must // request the change through that service. // +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception // or failure. @@ -596,6 +604,10 @@ func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *reques // The request was rejected because an invalid or out-of-range value was supplied // for an input parameter. // +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception // or failure. @@ -3752,6 +3764,113 @@ func (c *IAM) DeleteServerCertificateWithContext(ctx aws.Context, input *DeleteS return out, req.Send() } +const opDeleteServiceLinkedRole = "DeleteServiceLinkedRole" + +// DeleteServiceLinkedRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServiceLinkedRole operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServiceLinkedRole for more information on using the DeleteServiceLinkedRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceLinkedRoleRequest method. +// req, resp := client.DeleteServiceLinkedRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRole +func (c *IAM) DeleteServiceLinkedRoleRequest(input *DeleteServiceLinkedRoleInput) (req *request.Request, output *DeleteServiceLinkedRoleOutput) { + op := &request.Operation{ + Name: opDeleteServiceLinkedRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceLinkedRoleInput{} + } + + output = &DeleteServiceLinkedRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteServiceLinkedRole API operation for AWS Identity and Access Management. +// +// Submits a service-linked role deletion request and returns a DeletionTaskId, +// which you can use to check the status of the deletion. Before you call this +// operation, confirm that the role has no active sessions and that any resources +// used by the role in the linked service are deleted. If you call this operation +// more than once for the same service-linked role and an earlier deletion task +// is not complete, then the DeletionTaskId of the earlier request is returned. +// +// If you submit a deletion request for a service-linked role whose linked service +// is still accessing a resource, then the deletion task fails. If it fails, +// the GetServiceLinkedRoleDeletionStatus API operation returns the reason for +// the failure, including the resources that must be deleted. To delete the +// service-linked role, you must first remove those resources from the linked +// service and then submit the deletion request again. Resources are specific +// to the service that is linked to the role. For more information about removing +// resources from a service, see the AWS documentation (http://docs.aws.amazon.com/) +// for your service. +// +// For more information about service-linked roles, see Roles Terms and Concepts: +// AWS Service-Linked Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteServiceLinkedRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced an entity that does not exist. +// The error message describes the entity. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRole +func (c *IAM) DeleteServiceLinkedRole(input *DeleteServiceLinkedRoleInput) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + return out, req.Send() +} + +// DeleteServiceLinkedRoleWithContext is the same as DeleteServiceLinkedRole with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServiceLinkedRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteServiceLinkedRoleWithContext(ctx aws.Context, input *DeleteServiceLinkedRoleInput, opts ...request.Option) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteServiceSpecificCredential = "DeleteServiceSpecificCredential" // DeleteServiceSpecificCredentialRequest generates a "aws/request.Request" representing the @@ -6569,6 +6688,98 @@ func (c *IAM) GetServerCertificateWithContext(ctx aws.Context, input *GetServerC return out, req.Send() } +const opGetServiceLinkedRoleDeletionStatus = "GetServiceLinkedRoleDeletionStatus" + +// GetServiceLinkedRoleDeletionStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceLinkedRoleDeletionStatus operation. The "output" return +// value will be populated with the request's response once the request complets +// successfuly. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceLinkedRoleDeletionStatus for more information on using the GetServiceLinkedRoleDeletionStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceLinkedRoleDeletionStatusRequest method. +// req, resp := client.GetServiceLinkedRoleDeletionStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatus +func (c *IAM) GetServiceLinkedRoleDeletionStatusRequest(input *GetServiceLinkedRoleDeletionStatusInput) (req *request.Request, output *GetServiceLinkedRoleDeletionStatusOutput) { + op := &request.Operation{ + Name: opGetServiceLinkedRoleDeletionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLinkedRoleDeletionStatusInput{} + } + + output = &GetServiceLinkedRoleDeletionStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceLinkedRoleDeletionStatus API operation for AWS Identity and Access Management. +// +// Retrieves the status of your service-linked role deletion. After you use +// the DeleteServiceLinkedRole API operation to submit a service-linked role +// for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus +// to check the status of the deletion. If the deletion fails, this operation +// returns the reason that it failed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetServiceLinkedRoleDeletionStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced an entity that does not exist. +// The error message describes the entity. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatus +func (c *IAM) GetServiceLinkedRoleDeletionStatus(input *GetServiceLinkedRoleDeletionStatusInput) (*GetServiceLinkedRoleDeletionStatusOutput, error) { + req, out := c.GetServiceLinkedRoleDeletionStatusRequest(input) + return out, req.Send() +} + +// GetServiceLinkedRoleDeletionStatusWithContext is the same as GetServiceLinkedRoleDeletionStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceLinkedRoleDeletionStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetServiceLinkedRoleDeletionStatusWithContext(ctx aws.Context, input *GetServiceLinkedRoleDeletionStatusInput, opts ...request.Option) (*GetServiceLinkedRoleDeletionStatusOutput, error) { + req, out := c.GetServiceLinkedRoleDeletionStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetUser = "GetUser" // GetUserRequest generates a "aws/request.Request" representing the @@ -11227,7 +11438,7 @@ func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req // // * ErrCodePolicyEvaluationException "PolicyEvaluation" // The request failed because a provided policy could not be successfully evaluated. -// An additional detail message indicates the source of the failure. +// An additional detailed message indicates the source of the failure. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulateCustomPolicy func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulatePolicyResponse, error) { @@ -11397,7 +11608,7 @@ func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput // // * ErrCodePolicyEvaluationException "PolicyEvaluation" // The request failed because a provided policy could not be successfully evaluated. -// An additional detail message indicates the source of the failure. +// An additional detailed message indicates the source of the failure. // // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePrincipalPolicy func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*SimulatePolicyResponse, error) { @@ -14612,7 +14823,7 @@ type CreatePolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -15751,7 +15962,7 @@ type DeleteGroupPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -16212,7 +16423,7 @@ type DeleteRolePolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -16487,6 +16698,75 @@ func (s DeleteServerCertificateOutput) GoString() string { return s.String() } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRoleRequest +type DeleteServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the service-linked role to be deleted. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceLinkedRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceLinkedRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteServiceLinkedRoleInput) SetRoleName(v string) *DeleteServiceLinkedRoleInput { + s.RoleName = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRoleResponse +type DeleteServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier that you can use to check the status of the + // deletion. This identifier is returned in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleOutput) GoString() string { + return s.String() +} + +// SetDeletionTaskId sets the DeletionTaskId field's value. +func (s *DeleteServiceLinkedRoleOutput) SetDeletionTaskId(v string) *DeleteServiceLinkedRoleOutput { + s.DeletionTaskId = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredentialRequest type DeleteServiceSpecificCredentialInput struct { _ struct{} `type:"structure"` @@ -16713,7 +16993,7 @@ type DeleteUserPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -16850,6 +17130,48 @@ func (s DeleteVirtualMFADeviceOutput) GoString() string { return s.String() } +// The reason that the service-linked role deletion failed. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletionTaskFailureReasonType +type DeletionTaskFailureReasonType struct { + _ struct{} `type:"structure"` + + // A short description of the reason that the service-linked role deletion failed. + Reason *string `type:"string"` + + // A list of objects that contains details about the service-linked role deletion + // failure. If the service-linked role has active sessions or if any resources + // that were used by the role have not been deleted from the linked service, + // the role can't be deleted. This parameter includes a list of the resources + // that are associated with the role and the region in which the resources are + // being used. + RoleUsageList []*RoleUsageType `type:"list"` +} + +// String returns the string representation +func (s DeletionTaskFailureReasonType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletionTaskFailureReasonType) GoString() string { + return s.String() +} + +// SetReason sets the Reason field's value. +func (s *DeletionTaskFailureReasonType) SetReason(v string) *DeletionTaskFailureReasonType { + s.Reason = &v + return s +} + +// SetRoleUsageList sets the RoleUsageList field's value. +func (s *DeletionTaskFailureReasonType) SetRoleUsageList(v []*RoleUsageType) *DeletionTaskFailureReasonType { + s.RoleUsageList = v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicyRequest type DetachGroupPolicyInput struct { _ struct{} `type:"structure"` @@ -18073,7 +18395,7 @@ type GetGroupPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -18666,7 +18988,7 @@ type GetRolePolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -19045,6 +19367,84 @@ func (s *GetServerCertificateOutput) SetServerCertificate(v *ServerCertificate) return s } +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatusRequest +type GetServiceLinkedRoleDeletionStatusInput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier. This identifier is returned by the DeleteServiceLinkedRole + // operation in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLinkedRoleDeletionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLinkedRoleDeletionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceLinkedRoleDeletionStatusInput"} + if s.DeletionTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("DeletionTaskId")) + } + if s.DeletionTaskId != nil && len(*s.DeletionTaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeletionTaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeletionTaskId sets the DeletionTaskId field's value. +func (s *GetServiceLinkedRoleDeletionStatusInput) SetDeletionTaskId(v string) *GetServiceLinkedRoleDeletionStatusInput { + s.DeletionTaskId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatusResponse +type GetServiceLinkedRoleDeletionStatusOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the reason the deletion failed. + Reason *DeletionTaskFailureReasonType `type:"structure"` + + // The status of the deletion. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"DeletionTaskStatusType"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLinkedRoleDeletionStatusOutput) GoString() string { + return s.String() +} + +// SetReason sets the Reason field's value. +func (s *GetServiceLinkedRoleDeletionStatusOutput) SetReason(v *DeletionTaskFailureReasonType) *GetServiceLinkedRoleDeletionStatusOutput { + s.Reason = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetServiceLinkedRoleDeletionStatusOutput) SetStatus(v string) *GetServiceLinkedRoleDeletionStatusOutput { + s.Status = &v + return s +} + // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserRequest type GetUserInput struct { _ struct{} `type:"structure"` @@ -19122,7 +19522,7 @@ type GetUserPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -20465,6 +20865,10 @@ type ListGroupPoliciesOutput struct { // A list of policy names. // + // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@-+ + // // PolicyNames is a required field PolicyNames []*string `type:"list" required:"true"` } @@ -23482,7 +23886,7 @@ type PutGroupPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -23579,7 +23983,7 @@ type PutRolePolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -23685,7 +24089,7 @@ type PutUserPolicyInput struct { // // This parameter allows (per its regex pattern (http://wikipedia.org/wiki/regex)) // a string of characters consisting of upper and lowercase alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include any of the following characters: =,.@-+ // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -24520,6 +24924,43 @@ func (s *RoleDetail) SetRolePolicyList(v []*PolicyDetail) *RoleDetail { return s } +// An object that contains details about how a service-linked role is used. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RoleUsageType +type RoleUsageType struct { + _ struct{} `type:"structure"` + + // The name of the region where the service-linked role is being used. + Region *string `min:"1" type:"string"` + + // The name of the resource that is using the service-linked role. + Resources []*string `type:"list"` +} + +// String returns the string representation +func (s RoleUsageType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleUsageType) GoString() string { + return s.String() +} + +// SetRegion sets the Region field's value. +func (s *RoleUsageType) SetRegion(v string) *RoleUsageType { + s.Region = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *RoleUsageType) SetResources(v []*string) *RoleUsageType { + s.Resources = v + return s +} + // Contains the list of SAML providers for this account. // Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SAMLProviderListEntry type SAMLProviderListEntry struct { @@ -27472,15 +27913,18 @@ type User struct { // a list of AWS websites that capture a user's last sign-in time, see the Credential // Reports (http://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) // topic in the Using IAM guide. If a password is used more than once in a five-minute - // span, only the first use is returned in this field. This field is null (not - // present) when: + // span, only the first use is returned in this field. If the field is null + // (no value) then it indicates that they never signed in with a password. This + // can be because: // - // * The user does not have a password + // * The user never had a password. // - // * The password exists but has never been used (at least not since IAM - // started tracking this information on October 20th, 2014 + // * A password exists but has not been used since IAM started tracking this + // information on October 20th, 2014. // - // * there is no sign-in data associated with the user + // A null does not mean that the user never had a password. Also, if the user + // does not currently have a password, but had one in the past, then this field + // contains the date and time the most recent password was used. // // This value is returned only in the GetUser and ListUsers actions. PasswordLastUsed *time.Time `type:"timestamp" timestampFormat:"iso8601"` @@ -27761,6 +28205,20 @@ const ( ContextKeyTypeEnumDateList = "dateList" ) +const ( + // DeletionTaskStatusTypeSucceeded is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeSucceeded = "SUCCEEDED" + + // DeletionTaskStatusTypeInProgress is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeInProgress = "IN_PROGRESS" + + // DeletionTaskStatusTypeFailed is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeFailed = "FAILED" + + // DeletionTaskStatusTypeNotStarted is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeNotStarted = "NOT_STARTED" +) + const ( // EntityTypeUser is a EntityType enum value EntityTypeUser = "User" diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go index 97ad363c8a..470e19b37f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go @@ -144,9 +144,16 @@ const ( // "PolicyEvaluation". // // The request failed because a provided policy could not be successfully evaluated. - // An additional detail message indicates the source of the failure. + // An additional detailed message indicates the source of the failure. ErrCodePolicyEvaluationException = "PolicyEvaluation" + // ErrCodePolicyNotAttachableException for service response error code + // "PolicyNotAttachable". + // + // The request failed because AWS service role policies can only be attached + // to the service-linked role for that service. + ErrCodePolicyNotAttachableException = "PolicyNotAttachable" + // ErrCodeServiceFailureException for service response error code // "ServiceFailure". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index e65ef266f7..1d4fa38354 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -17085,6 +17085,9 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -17238,6 +17241,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + // SetContentType sets the ContentType field's value. func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { s.ContentType = &v @@ -19079,6 +19088,9 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -19178,6 +19190,12 @@ func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v diff --git a/vendor/github.com/boombuler/barcode/README.md b/vendor/github.com/boombuler/barcode/README.md index e11f908525..2a988db399 100644 --- a/vendor/github.com/boombuler/barcode/README.md +++ b/vendor/github.com/boombuler/barcode/README.md @@ -1,4 +1,7 @@ +[![Join the chat at https://gitter.im/golang-barcode/Lobby](https://badges.gitter.im/golang-barcode/Lobby.svg)](https://gitter.im/golang-barcode/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + ## Introduction ## + This is a package for GO which can be used to create different types of barcodes. ## Supported Barcode Types ## diff --git a/vendor/github.com/cenk/backoff/backoff.go b/vendor/github.com/cenk/backoff/backoff.go index 2102c5f2de..3676ee405d 100644 --- a/vendor/github.com/cenk/backoff/backoff.go +++ b/vendor/github.com/cenk/backoff/backoff.go @@ -15,7 +15,7 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff.Stop to indicate that no more retries should be made. + // or backoff. Stop to indicate that no more retries should be made. // // Example usage: // diff --git a/vendor/github.com/cenk/backoff/exponential.go b/vendor/github.com/cenk/backoff/exponential.go index 9a6addf075..d9de15a177 100644 --- a/vendor/github.com/cenk/backoff/exponential.go +++ b/vendor/github.com/cenk/backoff/exponential.go @@ -127,7 +127,9 @@ func (b *ExponentialBackOff) NextBackOff() time.Duration { // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance // is created and is reset when Reset() is called. // -// The elapsed time is computed using time.Now().UnixNano(). +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. func (b *ExponentialBackOff) GetElapsedTime() time.Duration { return b.Clock.Now().Sub(b.startTime) } diff --git a/vendor/github.com/cenk/backoff/ticker.go b/vendor/github.com/cenk/backoff/ticker.go index 49a99718d7..e742512fd3 100644 --- a/vendor/github.com/cenk/backoff/ticker.go +++ b/vendor/github.com/cenk/backoff/ticker.go @@ -18,9 +18,12 @@ type Ticker struct { stopOnce sync.Once } -// NewTicker returns a new Ticker containing a channel that will send the time at times -// specified by the BackOff argument. Ticker is guaranteed to tick at least once. -// The channel is closed when Stop method is called or BackOff stops. +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. func NewTicker(b BackOff) *Ticker { c := make(chan time.Time) t := &Ticker{ @@ -29,6 +32,7 @@ func NewTicker(b BackOff) *Ticker { b: ensureContext(b), stop: make(chan struct{}), } + t.b.Reset() go t.run() runtime.SetFinalizer(t, (*Ticker).Stop) return t @@ -42,7 +46,6 @@ func (t *Ticker) Stop() { func (t *Ticker) run() { c := t.c defer close(c) - t.b.Reset() // Ticker is guaranteed to tick at least once. afterC := t.send(time.Now()) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md new file mode 100644 index 0000000000..9a54a31a93 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md @@ -0,0 +1,5 @@ +# v2.0.0 + +* Add unix socket capability for SubmissionURL `http+unix://...` +* Add `RecordCountForValue` function to histograms +* Add `CHANGELOG.md` to repository diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md index 4ee10a5513..4ba18f2229 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md @@ -33,6 +33,7 @@ func main() { cfg.CheckManager.API.TokenApp = "circonus-gometrics" cfg.CheckManager.API.TokenURL = "https://api.circonus.com/v2" cfg.CheckManager.API.CACert = nil + cfg.CheckManager.API.TLSConfig = nil // Check _, an := path.Split(os.Args[0]) @@ -52,6 +53,7 @@ func main() { cfg.CheckManager.Broker.ID = "" cfg.CheckManager.Broker.SelectTag = "" cfg.CheckManager.Broker.MaxResponseTime = "500ms" + cfg.CheckManager.Broker.TLSConfig = nil // create a new cgm instance and start sending metrics... // see the complete example in the main README. @@ -73,10 +75,11 @@ func main() { | `cfg.CheckManager.API.TokenKey` | "" | [Circonus API Token key](https://login.circonus.com/user/tokens) | | `cfg.CheckManager.API.TokenApp` | "circonus-gometrics" | App associated with API token | | `cfg.CheckManager.API.URL` | "https://api.circonus.com/v2" | Circonus API URL | -| `cfg.CheckManager.API.CACert` | nil | [*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates | +| `cfg.CheckManager.API.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus API | +| `cfg.CheckManager.API.CACert` | nil | DEPRECATED - use TLSConfig ~~[*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates~~ | |Check|| | `cfg.CheckManager.Check.ID` | "" | Check ID of previously created check. (*Note: **check id** not **check bundle id**.*) | -| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. | +| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. Metrics can also be sent to a local [circonus-agent](https://github.com/circonus-labs/circonus-agent) by using the agent's URL (e.g. `http://127.0.0.1:2609/write/appid` where `appid` is a unique identifier for the application which will prefix all metrics. Additionally, the circonus-agent can optionally listen for requests to `/write` on a unix socket - to leverage this feature, use a URL such as `http+unix:///path/to/socket_file/write/appid`). | | `cfg.CheckManager.Check.InstanceID` | hostname:program name | An identifier for the 'group of metrics emitted by this process or service'. | | `cfg.CheckManager.Check.TargetHost` | InstanceID | Explicit setting of `check.target`. | | `cfg.CheckManager.Check.DisplayName` | InstanceID | Custom `check.display_name`. Shows in UI check list. | @@ -89,6 +92,7 @@ func main() { | `cfg.CheckManager.Broker.ID` | "" | ID of a specific broker to use when creating a check. Default is to use a random enterprise broker or the public Circonus default broker. | | `cfg.CheckManager.Broker.SelectTag` | "" | Used to select a broker with the same tag(s). If more than one broker has the tag(s), one will be selected randomly from the resulting list. (e.g. could be used to select one from a list of brokers serving a specific colo/region. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") | | `cfg.CheckManager.Broker.MaxResponseTime` | "500ms" | Maximum amount time to wait for a broker connection test to be considered valid. (if latency is > the broker will be considered invalid and not available for selection.) | +| `cfg.CheckManager.Broker.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus Broker | ## Notes: diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md index ca6cabb23f..323f97c02a 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/README.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/README.md @@ -122,6 +122,7 @@ func main() { cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") + cmc.CheckManager.API.TLSConfig = nil // Check configuration options cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISSION_URL") @@ -142,6 +143,7 @@ func main() { cmc.CheckManager.Broker.ID = "" cmc.CheckManager.Broker.SelectTag = "" cmc.CheckManager.Broker.MaxResponseTime = "500ms" + cmc.CheckManager.Broker.TLSConfig = nil logger.Println("Creating new cgm instance") @@ -230,3 +232,5 @@ func main() { ``` Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. + +[![codecov](https://codecov.io/gh/maier/circonus-gometrics/branch/master/graph/badge.svg)](https://codecov.io/gh/maier/circonus-gometrics) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go index 598e20f2db..f3cd8cd8ad 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go @@ -75,13 +75,25 @@ type TagType []string // Config options for Circonus API type Config struct { - URL string - TokenKey string - TokenApp string + // URL defines the API URL - default https://api.circonus.com/v2/ + URL string + + // TokenKey defines the key to use when communicating with the API + TokenKey string + + // TokenApp defines the app to use when communicating with the API + TokenApp string + TokenAccountID string - CACert *x509.CertPool - Log *log.Logger - Debug bool + + // CACert deprecating, use TLSConfig instead + CACert *x509.CertPool + + // TLSConfig defines a custom tls configuration to use when communicating with the API + TLSConfig *tls.Config + + Log *log.Logger + Debug bool } // API Circonus API @@ -91,6 +103,7 @@ type API struct { app TokenAppType accountID TokenAccountIDType caCert *x509.CertPool + tlsConfig *tls.Config Debug bool Log *log.Logger useExponentialBackoff bool @@ -149,6 +162,7 @@ func New(ac *Config) (*API, error) { app: app, accountID: acctID, caCert: ac.CACert, + tlsConfig: ac.TLSConfig, Debug: ac.Debug, Log: ac.Log, useExponentialBackoff: false, @@ -304,7 +318,13 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er } client := retryablehttp.NewClient() - if a.apiURL.Scheme == "https" && a.caCert != nil { + if a.apiURL.Scheme == "https" { + var tlscfg *tls.Config + if a.tlsConfig != nil { // preference full custom tls config + tlscfg = a.tlsConfig + } else if a.caCert != nil { + tlscfg = &tls.Config{RootCAs: a.caCert} + } client.HTTPClient.Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ @@ -312,7 +332,7 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &tls.Config{RootCAs: a.caCert}, + TLSClientConfig: tlscfg, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, DisableCompression: true, diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go index a8413fb53a..6f24d7bfcd 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go @@ -104,6 +104,7 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { haveEnterprise := false for _, broker := range *brokerList { + broker := broker if cm.isValidBroker(&broker) { validBrokers[broker.CID] = broker if broker.Type == "enterprise" { @@ -166,6 +167,7 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { var brokerPort string valid := false for _, detail := range broker.Details { + detail := detail // broker must be active if detail.Status != statusActive { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go index f2ef7d6df7..665f732fc2 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go @@ -8,19 +8,21 @@ package checkmgr import ( "crypto/tls" "crypto/x509" - "errors" "fmt" "io/ioutil" "log" "net/url" "os" "path" + "regexp" "strconv" "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" + "github.com/pkg/errors" + "github.com/tv42/httpunix" ) // Check management offers: @@ -101,6 +103,8 @@ type BrokerConfig struct { // for a broker to be considered viable it must respond to a // connection attempt within this amount of time e.g. 200ms, 2s, 1m MaxResponseTime string + // TLS configuration to use when communicating wtih broker + TLSConfig *tls.Config } // Config options @@ -169,6 +173,7 @@ type CheckManager struct { brokerID api.IDType brokerSelectTag api.TagType brokerMaxResponseTime time.Duration + brokerTLS *tls.Config // state checkBundle *api.CheckBundle @@ -181,12 +186,15 @@ type CheckManager struct { trapMaxURLAge time.Duration trapmu sync.Mutex certPool *x509.CertPool + sockRx *regexp.Regexp } // Trap config type Trap struct { - URL *url.URL - TLS *tls.Config + URL *url.URL + TLS *tls.Config + IsSocket bool + SockTransport *httpunix.Transport } // NewCheckManager returns a new check manager @@ -213,6 +221,12 @@ func New(cfg *Config) (*CheckManager, error) { cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) } + rx, err := regexp.Compile(`^http\+unix://(?P.+)/write/(?P.+)$`) + if err != nil { + return nil, errors.Wrap(err, "compiling socket regex") + } + cm.sockRx = rx + if cfg.Check.SubmissionURL != "" { cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) } @@ -232,7 +246,7 @@ func New(cfg *Config) (*CheckManager, error) { cfg.API.Log = cm.Log apih, err := api.New(&cfg.API) if err != nil { - return nil, err + return nil, errors.Wrap(err, "initializing api client") } cm.apih = apih } @@ -250,7 +264,7 @@ func New(cfg *Config) (*CheckManager, error) { } id, err := strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "converting check id") } cm.checkID = api.IDType(id) @@ -265,7 +279,7 @@ func New(cfg *Config) (*CheckManager, error) { } fm, err := strconv.ParseBool(fma) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing force metric activation") } cm.forceMetricActivation = fm @@ -307,7 +321,7 @@ func New(cfg *Config) (*CheckManager, error) { } maxDur, err := time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing max url age") } cm.trapMaxURLAge = maxDur @@ -318,7 +332,7 @@ func New(cfg *Config) (*CheckManager, error) { } id, err = strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker id") } cm.brokerID = api.IDType(id) @@ -332,10 +346,13 @@ func New(cfg *Config) (*CheckManager, error) { } maxDur, err = time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker max response time") } cm.brokerMaxResponseTime = maxDur + // add user specified tls config for broker if provided + cm.brokerTLS = cfg.Broker.TLSConfig + // metrics cm.availableMetrics = make(map[string]bool) cm.metricTags = make(map[string][]string) @@ -384,25 +401,64 @@ func (cm *CheckManager) IsReady() bool { // GetSubmissionURL returns submission url for circonus func (cm *CheckManager) GetSubmissionURL() (*Trap, error) { if cm.trapURL == "" { - return nil, fmt.Errorf("[ERROR] no submission url currently available") - // if err := cm.initializeTrapURL(); err != nil { - // return nil, err - // } + return nil, errors.Errorf("get submission url - submission url unavailable") } trap := &Trap{} u, err := url.Parse(string(cm.trapURL)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get submission url") } trap.URL = u + if u.Scheme == "http+unix" { + service := "circonus-agent" + sockPath := "" + metricID := "" + + subNames := cm.sockRx.SubexpNames() + matches := cm.sockRx.FindAllStringSubmatch(string(cm.trapURL), -1) + for _, match := range matches { + for idx, val := range match { + switch subNames[idx] { + case "sockfile": + sockPath = val + case "id": + metricID = val + } + } + } + + if sockPath == "" || metricID == "" { + return nil, errors.Errorf("get submission url - invalid socket url (%s)", cm.trapURL) + } + + u, err := url.Parse(fmt.Sprintf("http+unix://%s/write/%s", service, metricID)) + if err != nil { + return nil, errors.Wrap(err, "get submission url") + } + trap.URL = u + trap.SockTransport = &httpunix.Transport{ + DialTimeout: 100 * time.Millisecond, + RequestTimeout: 1 * time.Second, + ResponseHeaderTimeout: 1 * time.Second, + } + trap.SockTransport.RegisterLocation(service, sockPath) + trap.IsSocket = true + } + if u.Scheme == "https" { + // preference user-supplied TLS configuration + if cm.brokerTLS != nil { + trap.TLS = cm.brokerTLS + return trap, nil + } + if cm.certPool == nil { if err := cm.loadCACert(); err != nil { - return nil, err + return nil, errors.Wrap(err, "get submission url") } } t := &tls.Config{ @@ -424,18 +480,19 @@ func (cm *CheckManager) ResetTrap() error { } cm.trapURL = "" - cm.certPool = nil - err := cm.initializeTrapURL() - return err + cm.certPool = nil // force re-fetching CA cert (if custom TLS config not supplied) + return cm.initializeTrapURL() } // RefreshTrap check when the last time the URL was reset, reset if needed -func (cm *CheckManager) RefreshTrap() { +func (cm *CheckManager) RefreshTrap() error { if cm.trapURL == "" { - return + return nil } if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge { - cm.ResetTrap() + return cm.ResetTrap() } + + return nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go index ba73ae625e..e335aea960 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go @@ -30,22 +30,35 @@ package circonusgometrics import ( - "errors" + "bufio" + "bytes" + "fmt" "io/ioutil" "log" "os" "strconv" + "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" "github.com/circonus-labs/circonus-gometrics/checkmgr" + "github.com/pkg/errors" ) const ( defaultFlushInterval = "10s" // 10 * time.Second ) +// Metric defines an individual metric +type Metric struct { + Type string `json:"_type"` + Value interface{} `json:"_value"` +} + +// Metrics holds host metrics +type Metrics map[string]Metric + // Config options for circonus-gometrics type Config struct { Log *log.Logger @@ -63,6 +76,12 @@ type Config struct { Interval string } +type prevMetrics struct { + metrics *Metrics + metricsmu sync.Mutex + ts time.Time +} + // CirconusMetrics state type CirconusMetrics struct { Log *log.Logger @@ -75,7 +94,9 @@ type CirconusMetrics struct { flushInterval time.Duration flushing bool flushmu sync.Mutex + packagingmu sync.Mutex check *checkmgr.CheckManager + lastMetrics *prevMetrics counters map[string]uint64 cm sync.Mutex @@ -83,7 +104,7 @@ type CirconusMetrics struct { counterFuncs map[string]func() uint64 cfm sync.Mutex - gauges map[string]string + gauges map[string]interface{} gm sync.Mutex gaugeFuncs map[string]func() int64 @@ -114,11 +135,12 @@ func New(cfg *Config) (*CirconusMetrics, error) { cm := &CirconusMetrics{ counters: make(map[string]uint64), counterFuncs: make(map[string]func() uint64), - gauges: make(map[string]string), + gauges: make(map[string]interface{}), gaugeFuncs: make(map[string]func() int64), histograms: make(map[string]*Histogram), text: make(map[string]string), textFuncs: make(map[string]func() string), + lastMetrics: &prevMetrics{}, } // Logging @@ -143,7 +165,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { dur, err := time.ParseDuration(fi) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing flush interval") } cm.flushInterval = dur } @@ -154,7 +176,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetCounters != "" { setting, err := strconv.ParseBool(cfg.ResetCounters) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset counters") } cm.resetCounters = setting } @@ -163,7 +185,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetGauges != "" { setting, err := strconv.ParseBool(cfg.ResetGauges) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset gauges") } cm.resetGauges = setting } @@ -172,7 +194,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetHistograms != "" { setting, err := strconv.ParseBool(cfg.ResetHistograms) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset histograms") } cm.resetHistograms = setting } @@ -181,7 +203,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetText != "" { setting, err := strconv.ParseBool(cfg.ResetText) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset text") } cm.resetText = setting } @@ -193,7 +215,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { check, err := checkmgr.New(&cfg.CheckManager) if err != nil { - return nil, err + return nil, errors.Wrap(err, "creating new check manager") } cm.check = check } @@ -202,7 +224,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { cm.check.Initialize() // if automatic flush is enabled, start it. - // note: submit will jettison metrics until initialization has completed. + // NOTE: submit will jettison metrics until initialization has completed. if cm.flushInterval > time.Duration(0) { go func() { for range time.NewTicker(cm.flushInterval).C { @@ -224,24 +246,18 @@ func (m *CirconusMetrics) Ready() bool { return m.check.IsReady() } -// Flush metrics kicks off the process of sending metrics to Circonus -func (m *CirconusMetrics) Flush() { - if m.flushing { - return - } - m.flushmu.Lock() - m.flushing = true - m.flushmu.Unlock() +func (m *CirconusMetrics) packageMetrics() (map[string]*api.CheckBundleMetric, Metrics) { + + m.packagingmu.Lock() + defer m.packagingmu.Unlock() if m.Debug { - m.Log.Println("[DEBUG] Flushing metrics") + m.Log.Println("[DEBUG] Packaging metrics") } - // check for new metrics and enable them automatically - newMetrics := make(map[string]*api.CheckBundleMetric) - counters, gauges, histograms, text := m.snapshot() - output := make(map[string]interface{}) + newMetrics := make(map[string]*api.CheckBundleMetric) + output := make(Metrics, len(counters)+len(gauges)+len(histograms)+len(text)) for name, value := range counters { send := m.check.IsMetricActive(name) if !send && m.check.ActivateMetric(name) { @@ -253,10 +269,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "L", - "_value": value, - } + output[name] = Metric{Type: "L", Value: value} } } @@ -271,10 +284,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } + output[name] = Metric{Type: m.getGaugeType(value), Value: value} } } @@ -289,10 +299,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value.DecStrings(), - } + output[name] = Metric{Type: "n", Value: value.DecStrings()} } } @@ -307,13 +314,85 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "s", - "_value": value, - } + output[name] = Metric{Type: "s", Value: value} } } + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + m.lastMetrics.metrics = &output + m.lastMetrics.ts = time.Now() + + return newMetrics, output +} + +// PromOutput returns lines of metrics in prom format +func (m *CirconusMetrics) PromOutput() (*bytes.Buffer, error) { + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + + if m.lastMetrics.metrics == nil { + return nil, errors.New("no metrics available") + } + + var b bytes.Buffer + w := bufio.NewWriter(&b) + + ts := m.lastMetrics.ts.UnixNano() / int64(time.Millisecond) + + for name, metric := range *m.lastMetrics.metrics { + switch metric.Type { + case "n": + if strings.HasPrefix(fmt.Sprintf("%v", metric.Value), "[H[") { + continue // circonus histogram != prom "histogram" (aka percentile) + } + case "s": + continue // text metrics unsupported + } + fmt.Fprintf(w, "%s %v %d\n", name, metric.Value, ts) + } + + err := w.Flush() + if err != nil { + return nil, errors.Wrap(err, "flushing metric buffer") + } + + return &b, err +} + +// FlushMetrics flushes current metrics to a structure and returns it (does NOT send to Circonus) +func (m *CirconusMetrics) FlushMetrics() *Metrics { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return &Metrics{} + } + + m.flushing = true + m.flushmu.Unlock() + + _, output := m.packageMetrics() + + m.flushmu.Lock() + m.flushing = false + m.flushmu.Unlock() + + return &output +} + +// Flush metrics kicks off the process of sending metrics to Circonus +func (m *CirconusMetrics) Flush() { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return + } + + m.flushing = true + m.flushmu.Unlock() + + newMetrics, output := m.packageMetrics() + if len(output) > 0 { m.submit(output, newMetrics) } else { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go index 9dce5cdf52..4e05484ece 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go @@ -22,7 +22,48 @@ func (m *CirconusMetrics) Gauge(metric string, val interface{}) { func (m *CirconusMetrics) SetGauge(metric string, val interface{}) { m.gm.Lock() defer m.gm.Unlock() - m.gauges[metric] = m.gaugeValString(val) + m.gauges[metric] = val +} + +// AddGauge adds value to existing gauge +func (m *CirconusMetrics) AddGauge(metric string, val interface{}) { + m.gm.Lock() + defer m.gm.Unlock() + + v, ok := m.gauges[metric] + if !ok { + m.gauges[metric] = val + return + } + + switch val.(type) { + default: + // ignore it, unsupported type + case int: + m.gauges[metric] = v.(int) + val.(int) + case int8: + m.gauges[metric] = v.(int8) + val.(int8) + case int16: + m.gauges[metric] = v.(int16) + val.(int16) + case int32: + m.gauges[metric] = v.(int32) + val.(int32) + case int64: + m.gauges[metric] = v.(int64) + val.(int64) + case uint: + m.gauges[metric] = v.(uint) + val.(uint) + case uint8: + m.gauges[metric] = v.(uint8) + val.(uint8) + case uint16: + m.gauges[metric] = v.(uint16) + val.(uint16) + case uint32: + m.gauges[metric] = v.(uint32) + val.(uint32) + case uint64: + m.gauges[metric] = v.(uint64) + val.(uint64) + case float32: + m.gauges[metric] = v.(float32) + val.(float32) + case float64: + m.gauges[metric] = v.(float64) + val.(float64) + } } // RemoveGauge removes a gauge @@ -33,7 +74,7 @@ func (m *CirconusMetrics) RemoveGauge(metric string) { } // GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) -func (m *CirconusMetrics) GetGaugeTest(metric string) (string, error) { +func (m *CirconusMetrics) GetGaugeTest(metric string) (interface{}, error) { m.gm.Lock() defer m.gm.Unlock() @@ -41,7 +82,7 @@ func (m *CirconusMetrics) GetGaugeTest(metric string) (string, error) { return val, nil } - return "", fmt.Errorf("Gauge metric '%s' not found", metric) + return nil, fmt.Errorf("Gauge metric '%s' not found", metric) } // SetGaugeFunc sets a gauge to a function [called at flush interval] @@ -58,36 +99,31 @@ func (m *CirconusMetrics) RemoveGaugeFunc(metric string) { delete(m.gaugeFuncs, metric) } -// gaugeValString converts an interface value (of a supported type) to a string -func (m *CirconusMetrics) gaugeValString(val interface{}) string { - vs := "" - switch v := val.(type) { - default: - // ignore it, unsupported type +// getGaugeType returns accurate resmon type for underlying type of gauge value +func (m *CirconusMetrics) getGaugeType(v interface{}) string { + mt := "n" + switch v.(type) { case int: - vs = fmt.Sprintf("%d", v) + mt = "i" case int8: - vs = fmt.Sprintf("%d", v) + mt = "i" case int16: - vs = fmt.Sprintf("%d", v) + mt = "i" case int32: - vs = fmt.Sprintf("%d", v) - case int64: - vs = fmt.Sprintf("%d", v) + mt = "i" case uint: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint8: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint16: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint32: - vs = fmt.Sprintf("%d", v) + mt = "I" + case int64: + mt = "l" case uint64: - vs = fmt.Sprintf("%d", v) - case float32: - vs = fmt.Sprintf("%f", v) - case float64: - vs = fmt.Sprintf("%f", v) + mt = "L" } - return vs + + return mt } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go index 71a05054ed..d39f008de3 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go @@ -28,6 +28,17 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) { m.SetHistogramValue(metric, val) } +// RecordCountForValue adds count n for value to a histogram +func (m *CirconusMetrics) RecordCountForValue(metric string, val float64, n int64) { + hist := m.NewHistogram(metric) + + m.hm.Lock() + hist.rw.Lock() + hist.hist.RecordValues(val, n) + hist.rw.Unlock() + m.hm.Unlock() +} + // SetHistogramValue adds a value to a histogram func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { hist := m.NewHistogram(metric) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go index 3b0c0e0df5..0ffb450703 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go @@ -7,7 +7,6 @@ package circonusgometrics import ( "bytes" "encoding/json" - "errors" "fmt" "io/ioutil" "log" @@ -18,9 +17,10 @@ import ( "github.com/circonus-labs/circonus-gometrics/api" "github.com/hashicorp/go-retryablehttp" + "github.com/pkg/errors" ) -func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { +func (m *CirconusMetrics) submit(output Metrics, newMetrics map[string]*api.CheckBundleMetric) { // if there is nowhere to send metrics to, just return. if !m.check.IsReady() { @@ -43,6 +43,12 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s return } + // OK response from circonus-agent does not + // indicate how many metrics were received + if numStats == -1 { + numStats = len(output) + } + if m.Debug { m.Log.Printf("[DEBUG] %d stats sent\n", numStats) } @@ -51,7 +57,7 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { trap, err := m.check.GetSubmissionURL() if err != nil { - return 0, err + return 0, errors.Wrap(err, "trap call") } dataReader := bytes.NewReader(payload) @@ -68,7 +74,7 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { retryPolicy := func(resp *http.Response, err error) (bool, error) { if err != nil { lastHTTPError = err - return true, err + return true, errors.Wrap(err, "retry policy") } // Check the response code. We retry on 500-range responses to allow // the server time to recover, as 500's are typically not permanent @@ -98,20 +104,24 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { TLSClientConfig: trap.TLS, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } - } else { + } else if trap.URL.Scheme == "http" { client.HTTPClient.Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - TLSHandshakeTimeout: 10 * time.Second, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } + } else if trap.IsSocket { + m.Log.Println("using socket transport") + client.HTTPClient.Transport = trap.SockTransport + } else { + return 0, errors.Errorf("unknown scheme (%s), skipping submission", trap.URL.Scheme) } client.RetryWaitMin = 1 * time.Second client.RetryWaitMax = 5 * time.Second @@ -138,10 +148,17 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { if attempts == client.RetryMax { m.check.RefreshTrap() } - return 0, err + return 0, errors.Wrap(err, "trap call") } defer resp.Body.Close() + + // no content - expected result from + // circonus-agent when metrics accepted + if resp.StatusCode == http.StatusNoContent { + return -1, nil + } + body, err := ioutil.ReadAll(resp.Body) if err != nil { m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err) @@ -152,7 +169,7 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { m.Log.Printf("[ERROR] parsing body, proceeding. %v (%s)\n", err, body) } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode)) } switch v := response["stats"].(type) { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go index 73259a7b15..87c80516ba 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go @@ -17,7 +17,6 @@ func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.Respon start := time.Now().UnixNano() handler(rw, req) elapsed := time.Now().UnixNano() - start - //hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency") m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second)) } } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go index 4428e89851..7c7944653f 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/util.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/util.go @@ -33,7 +33,7 @@ func (m *CirconusMetrics) Reset() { m.counters = make(map[string]uint64) m.counterFuncs = make(map[string]func() uint64) - m.gauges = make(map[string]string) + m.gauges = make(map[string]interface{}) m.gaugeFuncs = make(map[string]func() int64) m.histograms = make(map[string]*Histogram) m.text = make(map[string]string) @@ -41,7 +41,7 @@ func (m *CirconusMetrics) Reset() { } // snapshot returns a copy of the values of all registered counters and gauges. -func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { +func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]interface{}, h map[string]*circonusllhist.Histogram, t map[string]string) { c = m.snapCounters() g = m.snapGauges() h = m.snapHistograms() @@ -75,23 +75,23 @@ func (m *CirconusMetrics) snapCounters() map[string]uint64 { return c } -func (m *CirconusMetrics) snapGauges() map[string]string { +func (m *CirconusMetrics) snapGauges() map[string]interface{} { m.gm.Lock() defer m.gm.Unlock() m.gfm.Lock() defer m.gfm.Unlock() - g := make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) + g := make(map[string]interface{}, len(m.gauges)+len(m.gaugeFuncs)) for n, v := range m.gauges { g[n] = v } if m.resetGauges && len(g) > 0 { - m.gauges = make(map[string]string) + m.gauges = make(map[string]interface{}) } for n, f := range m.gaugeFuncs { - g[n] = m.gaugeValString(f()) + g[n] = f() } if m.resetGauges && len(g) > 0 { m.gaugeFuncs = make(map[string]func() int64) diff --git a/vendor/github.com/cockroachdb/cockroach-go/crdb/error.go b/vendor/github.com/cockroachdb/cockroach-go/crdb/error.go new file mode 100644 index 0000000000..fbd6984a2f --- /dev/null +++ b/vendor/github.com/cockroachdb/cockroach-go/crdb/error.go @@ -0,0 +1,51 @@ +package crdb + +import "fmt" + +type txError struct { + cause error +} + +// Error implements the error interface +func (e *txError) Error() string { return e.cause.Error() } + +// Cause returns the error encountered by the "ROLLBACK TO SAVEPOINT +// cockroach_restart" statement. This method also implements the internal +// pkg/errors.causer interface, so TxnRestartError works with pkg/errors.Cause(). +func (e *txError) Cause() error { return e.cause } + +// AmbiguousCommitError represents an error that left a transaction in an +// ambiguous state: unclear if it committed or not. +type AmbiguousCommitError struct { + txError +} + +func newAmbiguousCommitError(err error) *AmbiguousCommitError { + return &AmbiguousCommitError{txError{cause: err}} +} + +// TxnRestartError represents an error when restarting a transaction. `cause` is +// the error from restarting the txn and `retryCause` is the original error which +// triggered the restart. +type TxnRestartError struct { + txError + retryCause error + msg string +} + +func newTxnRestartError(err error, retryErr error) *TxnRestartError { + const msgPattern = "restarting txn failed. ROLLBACK TO SAVEPOINT " + + "encountered error: %s. Original error: %s." + return &TxnRestartError{ + txError: txError{cause: err}, + retryCause: retryErr, + msg: fmt.Sprintf(msgPattern, err, retryErr), + } +} + +// Error implements the error interface +func (e *TxnRestartError) Error() string { return e.msg } + +// RetryCause returns the error encountered by the transaction, which caused the +// transaction to be restarted. +func (e *TxnRestartError) RetryCause() error { return e.retryCause } diff --git a/vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go b/vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go index d622943d89..95be5f1530 100644 --- a/vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go +++ b/vendor/github.com/cockroachdb/cockroach-go/crdb/tx.go @@ -19,17 +19,10 @@ package crdb import ( "context" "database/sql" - "fmt" "github.com/lib/pq" ) -// AmbiguousCommitError represents an error that left a transaction in an -// ambiguous state: unclear if it committed or not. -type AmbiguousCommitError struct { - error -} - // ExecuteTx runs fn inside a transaction and retries it as needed. // On non-retryable failures, the transaction is aborted and rolled // back; on success, the transaction is committed. @@ -37,11 +30,13 @@ type AmbiguousCommitError struct { // we err on RELEASE with a communication error it's unclear if the transaction // has been committed or not (similar to erroring on COMMIT in other databases). // In that case, we return AmbiguousCommitError. +// There are cases when restarting a transaction fails: we err on ROLLBACK +// to the SAVEPOINT. In that case, we return a TxnRestartError. // // For more information about CockroachDB's transaction model see // https://cockroachlabs.com/docs/stable/transactions.html. // -// NOTE: the supplied exec closure should not have external side +// NOTE: the supplied fn closure should not have external side // effects beyond changes to the database. func ExecuteTx(ctx context.Context, db *sql.DB, txopts *sql.TxOptions, fn func(*sql.Tx) error) error { // Start a transaction. @@ -99,24 +94,12 @@ func ExecuteInTx(ctx context.Context, tx Tx, fn func() error) (err error) { pqErr, ok := err.(*pq.Error) if retryable := ok && (pqErr.Code == "CR000" || pqErr.Code == "40001"); !retryable { if released { - err = &AmbiguousCommitError{err} + err = newAmbiguousCommitError(err) } return err } - if _, err = tx.ExecContext(ctx, "ROLLBACK TO SAVEPOINT cockroach_restart"); err != nil { - // ROLLBACK TO SAVEPOINT failed. If it failed with a lib/pq error, we want - // to pass this error to the client, but also include the original error - // message and code. So, we'll do some surgery on lib/pq errors in - // particular. - // If it failed with any other error (e.g. the "driver: bad connection" is - // untyped), we overwrite the error. - msgPattern := "restarting txn failed. ROLLBACK TO SAVEPOINT encountered error: %s. " + - "Original error (code: %s): %s." - if rollbackPQErr, ok := err.(*pq.Error); ok { - rollbackPQErr.Message = fmt.Sprintf(msgPattern, rollbackPQErr, pqErr.Code, pqErr) - return rollbackPQErr - } - return fmt.Errorf(msgPattern, err, pqErr.Code, pqErr) + if _, retryErr := tx.ExecContext(ctx, "ROLLBACK TO SAVEPOINT cockroach_restart"); retryErr != nil { + return newTxnRestartError(retryErr, err) } } } diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index 3c8948252f..e687450566 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -670,8 +670,15 @@ func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { } func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) for i, k := range p { neps[i] = eps[k] } diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go index 216139c9cc..237fdbe8ff 100644 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -8,49 +8,50 @@ package client import ( "errors" "fmt" - codec1978 "github.com/ugorji/go/codec" "reflect" "runtime" time "time" + + codec1978 "github.com/ugorji/go/codec" ) const ( // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 + codecSelferC_UTF87612 = 1 + codecSelferC_RAW7612 = 0 // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 + codecSelferValueTypeArray7612 = 10 + codecSelferValueTypeMap7612 = 9 // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 + codecSelfer_containerMapKey7612 = 2 + codecSelfer_containerMapValue7612 = 3 + codecSelfer_containerMapEnd7612 = 4 + codecSelfer_containerArrayElem7612 = 6 + codecSelfer_containerArrayEnd7612 = 7 ) var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) + codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) ) -type codecSelfer1819 struct{} +type codecSelfer7612 struct{} func init() { - if codec1978.GenVersion != 5 { + if codec1978.GenVersion != 8 { _, file, _, _ := runtime.Caller(0) err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) + 8, codec1978.GenVersion, file) panic(err) } if false { // reference the types, but skip this branch at build/run time - var v0 time.Time + var v0 time.Duration _ = v0 } } -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 +func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { @@ -63,86 +64,100 @@ func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 + _, _ = yysep2, yy2arr2 const yyr2 bool = false - var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) + r.WriteArrayStart(4) } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 + r.WriteMapStart(4) } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() yym4 := z.EncBinary() _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + r.EncodeInt(int64(x.Code)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("errorCode")) + r.WriteMapElemValue() yym5 := z.EncBinary() _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) + r.EncodeInt(int64(x.Code)) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { } else { - x.Node.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("message")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { } else { - x.Node.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { } else { - x.PrevNode.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("cause")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { } else { - x.PrevNode.CodecEncodeSelf(e) + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("index")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() } } } } -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 +func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r yym1 := z.DecBinary() @@ -151,28 +166,28 @@ func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { } else if z.HasExtensions() && z.DecExt(x) { } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { + if yyct2 == codecSelferValueTypeMap7612 { yyl2 := r.ReadMapStart() if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) + r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray1819 { + } else if yyct2 == codecSelferValueTypeArray7612 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() } else { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) } } } -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 +func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yys3Slc = z.DecScratchBuffer() // default slice to decode into @@ -188,10 +203,1816 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { break } } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) + r.ReadMapElemValue() + switch yys3 { + case "errorCode": + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv4 := &x.Code + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cause": + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv8 := &x.Cause + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv10 := &x.Index + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv13 := &x.Code + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv17 := &x.Cause + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv19 := &x.Index + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x)) + } +} + +func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(2) + } else { + r.WriteMapStart(2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "AfterIndex": + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv4 := &x.AfterIndex + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv6 := &x.Recursive + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv9 := &x.AfterIndex + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(1) + } else { + r.WriteMapStart(1) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv4 := &x.TTL + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv7 := &x.TTL + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj6-1, "") + } + r.ReadArrayEnd() +} + +func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(7) + } else { + r.WriteMapStart(7) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv8 := &x.PrevExist + yyv8.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv9 := &x.TTL + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv11 := &x.Refresh + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv13 := &x.Dir + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv15 := &x.NoValueOnSuccess + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv18 := &x.PrevValue + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv20 := &x.PrevIndex + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv22 := &x.PrevExist + yyv22.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv23 := &x.TTL + yym24 := z.DecBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.DecExt(yyv23) { + } else { + *((*int64)(yyv23)) = int64(r.DecodeInt(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv25 := &x.Refresh + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv27 := &x.Dir + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv29 := &x.NoValueOnSuccess + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj17-1, "") + } + r.ReadArrayEnd() +} + +func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sort")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv4 := &x.Recursive + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Sort": + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv6 := &x.Sort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv8 := &x.Quorum + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv13 := &x.Sort + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv15 := &x.Quorum + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj10-1, "") + } + r.ReadArrayEnd() +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv10 := &x.Dir + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv13 := &x.PrevValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv15 := &x.PrevIndex + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv17 := &x.Recursive + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("action")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } + var yyn6 bool + if x.Node == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("node")) + r.WriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } + var yyn9 bool + if x.PrevNode == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("prevNode")) + r.WriteMapElemValue() + if yyn9 { + r.EncodeNil() + } else { + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() switch yys3 { case "action": if r.TryDecodeAsNil() { @@ -206,6 +2027,9 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } case "node": + if x.Node == nil { + x.Node = new(Node) + } if r.TryDecodeAsNil() { if x.Node != nil { x.Node = nil @@ -217,6 +2041,9 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { x.Node.CodecDecodeSelf(d) } case "prevNode": + if x.PrevNode == nil { + x.PrevNode = new(Node) + } if r.TryDecodeAsNil() { if x.PrevNode != nil { x.PrevNode = nil @@ -231,11 +2058,11 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) + r.ReadMapEnd() } func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj8 int @@ -248,10 +2075,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb8 = r.CheckBreak() } if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.Action = "" } else { @@ -263,6 +2090,9 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { *((*string)(yyv9)) = r.DecodeString() } } + if x.Node == nil { + x.Node = new(Node) + } yyj8++ if yyhl8 { yyb8 = yyj8 > l @@ -270,10 +2100,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb8 = r.CheckBreak() } if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { if x.Node != nil { x.Node = nil @@ -284,6 +2114,9 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { } x.Node.CodecDecodeSelf(d) } + if x.PrevNode == nil { + x.PrevNode = new(Node) + } yyj8++ if yyhl8 { yyb8 = yyj8 > l @@ -291,10 +2124,10 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb8 = r.CheckBreak() } if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { if x.PrevNode != nil { x.PrevNode = nil @@ -315,14 +2148,14 @@ func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if yyb8 { break } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() z.DecStructFieldNotFound(yyj8-1, "") } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() } func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { @@ -336,45 +2169,44 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 + _ = yyq2 + _, _ = yysep2, yy2arr2 const yyr2 bool = false yyq2[1] = x.Dir != false yyq2[6] = x.Expiration != nil yyq2[7] = x.TTL != 0 - var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) + r.WriteArrayStart(8) } else { - yynn2 = 5 + var yynn2 = 5 for _, b := range yyq2 { if b { yynn2++ } } - r.EncodeMapStart(yynn2) - yynn2 = 0 + r.WriteMapStart(yynn2) } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() yym4 := z.EncBinary() _ = yym4 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("key")) + r.WriteMapElemValue() yym5 := z.EncBinary() _ = yym5 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() if yyq2[1] { yym7 := z.EncBinary() _ = yym7 @@ -387,9 +2219,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } else { if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("dir")) + r.WriteMapElemValue() yym8 := z.EncBinary() _ = yym8 if false { @@ -399,35 +2231,35 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() yym10 := z.EncBinary() _ = yym10 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("value")) + r.WriteMapElemValue() yym11 := z.EncBinary() _ = yym11 if false { } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() if x.Nodes == nil { r.EncodeNil() } else { x.Nodes.CodecEncodeSelf(e) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("nodes")) + r.WriteMapElemValue() if x.Nodes == nil { r.EncodeNil() } else { @@ -435,7 +2267,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() yym16 := z.EncBinary() _ = yym16 if false { @@ -443,9 +2275,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeUint(uint64(x.CreatedIndex)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) + r.WriteMapElemValue() yym17 := z.EncBinary() _ = yym17 if false { @@ -454,7 +2286,7 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() yym19 := z.EncBinary() _ = yym19 if false { @@ -462,9 +2294,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeUint(uint64(x.ModifiedIndex)) } } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) + r.WriteMapElemValue() yym20 := z.EncBinary() _ = yym20 if false { @@ -472,55 +2304,70 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeUint(uint64(x.ModifiedIndex)) } } + var yyn21 bool + if x.Expiration == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { + if yyn21 { + r.WriteArrayElem() r.EncodeNil() + } else { + r.WriteArrayElem() + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } } } else { if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("expiration")) + r.WriteMapElemValue() + if yyn21 { r.EncodeNil() } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) + if x.Expiration == nil { + r.EncodeNil() } else { - z.EncFallback(x.Expiration) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } } } } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() if yyq2[7] { yym27 := z.EncBinary() _ = yym27 @@ -533,9 +2380,9 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } else { if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("ttl")) + r.WriteMapElemValue() yym28 := z.EncBinary() _ = yym28 if false { @@ -545,16 +2392,16 @@ func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { } } if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + r.WriteArrayEnd() } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) + r.WriteMapEnd() } } } } func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r yym1 := z.DecBinary() @@ -563,28 +2410,28 @@ func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { } else if z.HasExtensions() && z.DecExt(x) { } else { yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { + if yyct2 == codecSelferValueTypeMap7612 { yyl2 := r.ReadMapStart() if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) + r.ReadMapEnd() } else { x.codecDecodeSelfFromMap(yyl2, d) } - } else if yyct2 == codecSelferValueTypeArray1819 { + } else if yyct2 == codecSelferValueTypeArray7612 { yyl2 := r.ReadArrayStart() if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() } else { x.codecDecodeSelfFromArray(yyl2, d) } } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) } } } func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yys3Slc = z.DecScratchBuffer() // default slice to decode into @@ -600,10 +2447,10 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { break } } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) + r.ReadMapElemValue() switch yys3 { case "key": if r.TryDecodeAsNil() { @@ -673,6 +2520,9 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } } case "expiration": + if x.Expiration == nil { + x.Expiration = new(time.Time) + } if r.TryDecodeAsNil() { if x.Expiration != nil { x.Expiration = nil @@ -711,11 +2561,11 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) + r.ReadMapEnd() } func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r var yyj20 int @@ -728,10 +2578,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.Key = "" } else { @@ -750,10 +2600,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.Dir = false } else { @@ -772,10 +2622,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.Value = "" } else { @@ -794,10 +2644,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.Nodes = nil } else { @@ -811,10 +2661,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.CreatedIndex = 0 } else { @@ -833,10 +2683,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.ModifiedIndex = 0 } else { @@ -848,6 +2698,9 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) } } + if x.Expiration == nil { + x.Expiration = new(time.Time) + } yyj20++ if yyhl20 { yyb20 = yyj20 > l @@ -855,10 +2708,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { if x.Expiration != nil { x.Expiration = nil @@ -888,10 +2741,10 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { yyb20 = r.CheckBreak() } if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() return } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() if r.TryDecodeAsNil() { x.TTL = 0 } else { @@ -913,14 +2766,14 @@ func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { if yyb20 { break } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) + r.ReadArrayElem() z.DecStructFieldNotFound(yyj20-1, "") } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) + r.ReadArrayEnd() } func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r if x == nil { @@ -937,7 +2790,7 @@ func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { } func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r yym1 := z.DecBinary() @@ -949,24 +2802,2333 @@ func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { } } -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 +func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(5) + } else { + r.WriteMapStart(5) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sorted")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Sorted": + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv10 := &x.Sorted + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv12 := &x.Quorum + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv15 := &x.Prefix + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv17 := &x.Key + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv21 := &x.Sorted + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv23 := &x.Quorum + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj14-1, "") + } + r.ReadArrayEnd() +} + +func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "WaitIndex": + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv8 := &x.WaitIndex + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv10 := &x.Recursive + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv15 := &x.Key + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv17 := &x.WaitIndex + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(10) + } else { + r.WriteMapStart(10) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv10 := &x.PrevValue + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv12 := &x.PrevIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv14 := &x.PrevExist + yyv14.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv15 := &x.TTL + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv17 := &x.Refresh + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv21 := &x.NoValueOnSuccess + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv24 := &x.Prefix + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv26 := &x.Key + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv28 := &x.Value + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv30 := &x.PrevValue + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv32 := &x.PrevIndex + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv34 := &x.PrevExist + yyv34.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv37 := &x.Refresh + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv39 := &x.Dir + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv41 := &x.NoValueOnSuccess + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj23-1, "") + } + r.ReadArrayEnd() +} + +func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(6) + } else { + r.WriteMapStart(6) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv8 := &x.PrevValue + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv10 := &x.PrevIndex + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv12 := &x.Dir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv14 := &x.Recursive + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv17 := &x.Prefix + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv19 := &x.Key + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv21 := &x.PrevValue + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv23 := &x.PrevIndex + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv25 := &x.Dir + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv27 := &x.Recursive + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj16-1, "") + } + r.ReadArrayEnd() +} + +func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv10 := &x.TTL + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv15 := &x.Dir + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv17 := &x.Value + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv19 := &x.TTL + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.WriteArrayStart(len(v)) for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) + r.WriteArrayElem() if yyv1 == nil { r.EncodeNil() } else { yyv1.CodecEncodeSelf(e) } } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) + r.WriteArrayEnd() } -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 +func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer7612 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r @@ -982,79 +5144,49 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { yyv1 = yyv1[:0] yyc1 = true } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) if yyrl1 <= cap(yyv1) { yyv1 = yyv1[:yyrl1] } else { yyv1 = make([]*Node, yyrl1) } - } else { - yyv1 = make([]*Node, yyrl1) + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } + var yyj1 int + // var yydn1 bool + for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { + if yyj1 == 0 && len(yyv1) == 0 { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw3 := yyv1[yyj1] - yyw3.CodecDecodeSelf(d) + yyrl1 = 8 } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 *Node + yyv1 = make([]*Node, yyrl1) yyc1 = true } yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { + // yydn1 = r.TryDecodeAsNil() + + // if indefinite, etc, then expand the slice if necessary + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + + } + if yydb1 { + z.DecSwallow() + } else { if r.TryDecodeAsNil() { if yyv1[yyj1] != nil { *yyv1[yyj1] = Node{} @@ -1063,12 +5195,10 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { if yyv1[yyj1] == nil { yyv1[yyj1] = new(Node) } - yyw4 := yyv1[yyj1] - yyw4.CodecDecodeSelf(d) + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) } - } else { - z.DecSwallow() } } @@ -1076,7 +5206,7 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { yyv1 = yyv1[:yyj1] yyc1 = true } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []*Node{} + yyv1 = make([]*Node, 0) yyc1 = true } } @@ -1084,4 +5214,5 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { if yyc1 { *v = yyv1 } + } diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index a6ab468414..8df670f163 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -105,16 +105,16 @@ type auth struct { } func NewAuth(c *Client) Auth { - return &auth{remote: pb.NewAuthClient(c.ActiveConnection())} + return &auth{remote: RetryAuthClient(c)} } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}) return (*AuthDisableResponse)(resp), toErr(ctx, err) } @@ -139,12 +139,12 @@ func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) ( } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}) return (*AuthUserListResponse)(resp), toErr(ctx, err) } @@ -169,12 +169,12 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } @@ -202,7 +202,7 @@ type authenticator struct { } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}) return (*AuthenticateResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go index 83b4d1aaa2..2c8c2981d3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ b/vendor/github.com/coreos/etcd/clientv3/balancer.go @@ -29,11 +29,22 @@ import ( // This error is returned only when opts.BlockingWait is true. var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + // simpleBalancer does the bare minimum to expose multiple eps // to the grpc reconnection code path type simpleBalancer struct { - // addrs are the client's endpoints for grpc + // addrs are the client's endpoint addresses for grpc addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + // notifyCh notifies grpc of the set of addresses for connecting notifyCh chan []grpc.Address @@ -57,12 +68,12 @@ type simpleBalancer struct { donec chan struct{} // updateAddrsC notifies updateNotifyLoop to update addrs. - updateAddrsC chan struct{} + updateAddrsC chan notifyMsg // grpc issues TLS cert checks using the string passed into dial so // that string must be the host. To recover the full scheme://host URL, // have a map from hosts to the original endpoint. - host2ep map[string]string + hostPort2ep map[string]string // pinAddr is the currently pinned address; set to the empty string on // initialization and shutdown. @@ -72,21 +83,19 @@ type simpleBalancer struct { } func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) sb := &simpleBalancer{ addrs: addrs, + eps: eps, notifyCh: notifyCh, readyc: make(chan struct{}), upc: make(chan struct{}), stopc: make(chan struct{}), downc: make(chan struct{}), donec: make(chan struct{}), - updateAddrsC: make(chan struct{}, 1), - host2ep: getHost2ep(eps), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), } close(sb.downc) go sb.updateNotifyLoop() @@ -101,13 +110,27 @@ func (b *simpleBalancer) ConnectNotify() <-chan struct{} { return b.upc } -func (b *simpleBalancer) getEndpoint(host string) string { +func (b *simpleBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *simpleBalancer) endpoint(hostPort string) string { b.mu.Lock() defer b.mu.Unlock() - return b.host2ep[host] + return b.hostPort2ep[hostPort] } -func getHost2ep(eps []string) map[string]string { +func (b *simpleBalancer) endpoints() []string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.eps +} + +func (b *simpleBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func getHostPort2ep(eps []string) map[string]string { hm := make(map[string]string, len(eps)) for i := range eps { _, host, _ := parseEndpoint(eps[i]) @@ -116,14 +139,14 @@ func getHost2ep(eps []string) map[string]string { return hm } -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) +func (b *simpleBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) b.mu.Lock() - match := len(np) == len(b.host2ep) + match := len(np) == len(b.hostPort2ep) for k, v := range np { - if b.host2ep[k] != v { + if b.hostPort2ep[k] != v { match = false break } @@ -134,28 +157,38 @@ func (b *simpleBalancer) updateAddrs(eps []string) { return } - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps // updating notifyCh can trigger new connections, // only update addrs if all connections are down // or addrs does not include pinAddr. - update := !hasAddr(addrs, b.pinAddr) + update := !hasAddr(b.addrs, b.pinAddr) b.mu.Unlock() if update { select { - case b.updateAddrsC <- struct{}{}: + case b.updateAddrsC <- notifyNext: case <-b.stopc: } } } +func (b *simpleBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + func hasAddr(addrs []grpc.Address, targetAddr string) bool { for _, addr := range addrs { if targetAddr == addr.Addr { @@ -192,11 +225,11 @@ func (b *simpleBalancer) updateNotifyLoop() { default: } case downc == nil: - b.notifyAddrs() + b.notifyAddrs(notifyReset) select { case <-upc: - case <-b.updateAddrsC: - b.notifyAddrs() + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) case <-b.stopc: return } @@ -210,26 +243,58 @@ func (b *simpleBalancer) updateNotifyLoop() { } select { case <-downc: - case <-b.updateAddrsC: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) case <-b.stopc: return } - b.notifyAddrs() } } } -func (b *simpleBalancer) notifyAddrs() { +func (b *simpleBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } b.mu.RLock() addrs := b.addrs + pinAddr := b.pinAddr + downc := b.downc b.mu.RUnlock() + + var waitDown bool + if pinAddr != "" { + waitDown = true + for _, a := range addrs { + if a.Addr == pinAddr { + waitDown = false + } + } + } + select { case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } case <-b.stopc: } } func (b *simpleBalancer) Up(addr grpc.Address) func(error) { + f, _ := b.up(addr) + return f +} + +func (b *simpleBalancer) up(addr grpc.Address) (func(error), bool) { b.mu.Lock() defer b.mu.Unlock() @@ -237,20 +302,26 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) { // to "fix" it up at application layer. Otherwise, will panic // if b.upc is already closed. if b.closed { - return func(err error) {} + return func(err error) {}, false } // gRPC might call Up on a stale address. // Prevent updating pinAddr with a stale address. if !hasAddr(b.addrs, addr.Addr) { - return func(err error) {} + return func(err error) {}, false } if b.pinAddr != "" { - return func(err error) {} + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {}, false } // notify waiting Get()s and pin first connected address close(b.upc) b.downc = make(chan struct{}) b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } // notify client that a connection is up b.readyOnce.Do(func() { close(b.readyc) }) return func(err error) { @@ -259,7 +330,10 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) { close(b.downc) b.pinAddr = "" b.mu.Unlock() - } + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + }, true } func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { @@ -354,3 +428,11 @@ func getHost(ep string) string { } return url.Host } + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index dec664605a..bff7d7cc63 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -55,7 +55,8 @@ type Client struct { cfg Config creds *credentials.TransportCredentials - balancer *simpleBalancer + balancer *healthBalancer + mu sync.Mutex ctx context.Context cancel context.CancelFunc @@ -116,8 +117,10 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) + c.mu.Unlock() + c.balancer.updateAddrs(eps...) } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -227,7 +230,7 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) if host == "" && endpoint != "" { // dialing an endpoint not in the balancer; use // endpoint passed into dial @@ -375,7 +378,10 @@ func newClient(cfg *Config) (*Client, error) { client.Password = cfg.Password } - client.balancer = newSimpleBalancer(cfg.Endpoints) + sb := newSimpleBalancer(cfg.Endpoints) + hc := func(ep string) (bool, error) { return grpcHealthCheck(client, ep) } + client.balancer = newHealthBalancer(sb, cfg.DialTimeout, hc) + // use Endpoints[0] so that for https:// without any tls config given, then // grpc will assume the certificate server name is the endpoint host. conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) @@ -391,7 +397,7 @@ func newClient(cfg *Config) (*Client, error) { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.readyc: + case <-client.balancer.ready(): hasConn = true case <-ctx.Done(): case <-waitc: @@ -447,7 +453,7 @@ func (c *Client) checkVersion() (err error) { vs := strings.Split(resp.Version, ".") maj, min := 0, 0 if len(vs) >= 2 { - maj, rerr = strconv.Atoi(vs[0]) + maj, _ = strconv.Atoi(vs[0]) min, rerr = strconv.Atoi(vs[1]) } if maj < 3 || (maj == 3 && min < 2) { diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index bbecaaca74..8beba58a67 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -18,7 +18,6 @@ import ( "context" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" ) type ( @@ -75,27 +74,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r) + if err == nil { + return (*MemberUpdateResponse)(resp), nil } + return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}) + if err == nil { + return (*MemberListResponse)(resp), nil } + return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 0000000000..8f4ba08ae6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,249 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const minHealthRetryDuration = 3 * time.Second +const unknownService = "unknown service grpc.health.v1.Health" + +type healthCheckFunc func(ep string) (bool, error) + +// healthBalancer wraps a balancer so that it uses health checking +// to choose its endpoints. +type healthBalancer struct { + *simpleBalancer + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + // mu protects addrs, eps, unhealthy map, and stopc. + mu sync.RWMutex + + // addrs stores all grpc addresses associated with the balancer. + addrs []grpc.Address + + // eps stores all client endpoints + eps []string + + // unhealthy tracks the last unhealthy time of endpoints. + unhealthy map[string]time.Time + + stopc chan struct{} + stopOnce sync.Once + + hostPort2ep map[string]string + + wg sync.WaitGroup +} + +func newHealthBalancer(b *simpleBalancer, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + hb := &healthBalancer{ + simpleBalancer: b, + healthCheck: hc, + eps: b.endpoints(), + addrs: eps2addrs(b.endpoints()), + hostPort2ep: getHostPort2ep(b.endpoints()), + unhealthy: make(map[string]time.Time), + stopc: make(chan struct{}), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy(timeout) + }() + + return hb +} + +func (hb *healthBalancer) Up(addr grpc.Address) func(error) { + f, used := hb.up(addr) + if !used { + return f + } + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + hb.hostPortError(addr.Addr, err) + f(err) + } +} + +func (hb *healthBalancer) up(addr grpc.Address) (func(error), bool) { + if !hb.mayPin(addr) { + return func(err error) {}, false + } + return hb.simpleBalancer.up(addr) +} + +func (hb *healthBalancer) Close() error { + hb.stopOnce.Do(func() { close(hb.stopc) }) + hb.wg.Wait() + return hb.simpleBalancer.Close() +} + +func (hb *healthBalancer) updateAddrs(eps ...string) { + addrs, hostPort2ep := eps2addrs(eps), getHostPort2ep(eps) + hb.mu.Lock() + hb.addrs, hb.eps, hb.hostPort2ep = addrs, eps, hostPort2ep + hb.unhealthy = make(map[string]time.Time) + hb.mu.Unlock() + hb.simpleBalancer.updateAddrs(eps...) +} + +func (hb *healthBalancer) endpoint(host string) string { + hb.mu.RLock() + defer hb.mu.RUnlock() + return hb.hostPort2ep[host] +} + +func (hb *healthBalancer) endpoints() []string { + hb.mu.RLock() + defer hb.mu.RUnlock() + return hb.eps +} + +func (hb *healthBalancer) updateUnhealthy(timeout time.Duration) { + for { + select { + case <-time.After(timeout): + hb.mu.Lock() + for k, v := range hb.unhealthy { + if time.Since(v) > timeout { + delete(hb.unhealthy, k) + if logger.V(4) { + logger.Infof("clientv3/health-balancer: removes %q from unhealthy after %v", k, timeout) + } + } + } + hb.mu.Unlock() + eps := []string{} + for _, addr := range hb.liveAddrs() { + eps = append(eps, hb.endpoint(addr.Addr)) + } + hb.simpleBalancer.updateAddrs(eps...) + case <-hb.stopc: + return + } + } +} + +func (hb *healthBalancer) liveAddrs() []grpc.Address { + hb.mu.RLock() + defer hb.mu.RUnlock() + hbAddrs := hb.addrs + if len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.unhealthy) == len(hb.addrs) { + return hbAddrs + } + addrs := make([]grpc.Address, 0, len(hb.addrs)-len(hb.unhealthy)) + for _, addr := range hb.addrs { + if _, unhealthy := hb.unhealthy[addr.Addr]; !unhealthy { + addrs = append(addrs, addr) + } + } + return addrs +} + +func (hb *healthBalancer) hostPortError(hostPort string, err error) { + hb.mu.Lock() + if _, ok := hb.hostPort2ep[hostPort]; ok { + hb.unhealthy[hostPort] = time.Now() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: marking %q as unhealthy (%q)", hostPort, err.Error()) + } + } + hb.mu.Unlock() +} + +func (hb *healthBalancer) mayPin(addr grpc.Address) bool { + hb.mu.RLock() + if _, ok := hb.hostPort2ep[addr.Addr]; !ok { // stale host:port + hb.mu.RUnlock() + return false + } + skip := len(hb.addrs) == 1 || len(hb.unhealthy) == 0 || len(hb.addrs) == len(hb.unhealthy) + failedTime, bad := hb.unhealthy[addr.Addr] + dur := hb.healthCheckTimeout + hb.mu.RUnlock() + if skip || !bad { + return true + } + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < dur { + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, dur) + } + return false + } + if ok, _ := hb.healthCheck(addr.Addr); ok { + hb.mu.Lock() + delete(hb.unhealthy, addr.Addr) + hb.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q is healthy (health check success)", addr.Addr) + } + return true + } + hb.mu.Lock() + hb.unhealthy[addr.Addr] = time.Now() + hb.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/health-balancer: %q becomes unhealthy (health check failed)", addr.Addr) + } + return false +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { + // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index ead08a0827..b578d9ebe4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -18,8 +18,6 @@ import ( "context" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" ) type ( @@ -132,28 +130,11 @@ func (kv *kv) Txn(ctx context.Context) Txn { } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest()) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } @@ -180,5 +161,5 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { default: panic("Unknown op") } - return OpResponse{}, err + return OpResponse{}, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index e476db5be2..aa9ea2d78a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -22,7 +22,6 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) @@ -183,72 +182,55 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Durati } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(ctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(ctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil } + return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { - for { - resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, grpc.FailFast(false)) - if err == nil { - leases := make([]LeaseStatus, len(resp.Leases)) - for i := range resp.Leases { - leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} - } - return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil } + return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -389,7 +371,7 @@ func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(cctx) if err != nil { return nil, toErr(ctx, err) } @@ -433,7 +415,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { } else { for { resp, err := stream.Recv() - if err != nil { if canceledByCaller(l.stopCtx, err) { return err @@ -461,7 +442,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { // resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(sctx) if err != nil { cancel() return nil, err diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 988a5f7c28..25abc9c910 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -19,8 +19,6 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" ) type ( @@ -77,9 +75,9 @@ func NewMaintenance(c *Client) Maintenance { return nil, nil, err } cancel := func() { conn.Close() } - return pb.NewMaintenanceClient(conn), cancel, nil + return RetryMaintenanceClient(c, conn), cancel, nil }, - remote: pb.NewMaintenanceClient(c.conn), + remote: RetryMaintenanceClient(c, c.conn), } } @@ -98,15 +96,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := m.remote.Alarm(ctx, req) + if err == nil { + return (*AlarmResponse)(resp), nil } + return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -132,7 +126,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + resp, err := m.remote.Alarm(ctx, req) if err == nil { return (*AlarmResponse)(resp), nil } @@ -145,7 +139,7 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm return nil, toErr(ctx, err) } defer cancel() - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}) if err != nil { return nil, toErr(ctx, err) } @@ -158,7 +152,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo return nil, toErr(ctx, err) } defer cancel() - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) + resp, err := remote.Status(ctx, &pb.StatusRequest{}) if err != nil { return nil, toErr(ctx, err) } @@ -171,7 +165,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* return nil, toErr(ctx, err) } defer cancel() - resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, grpc.FailFast(false)) + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}) if err != nil { return nil, toErr(ctx, err) } @@ -179,7 +173,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}) if err != nil { return nil, toErr(ctx, err) } @@ -206,6 +200,6 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { } func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { - resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, grpc.FailFast(false)) + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}) return (*MoveLeaderResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 0000000000..c6ef585b5b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 272b62b921..e6d17d0320 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -26,13 +26,13 @@ import ( ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error +type retryRPCFunc func(context.Context, rpcFunc) error type retryStopErrFunc func(error) bool -func isReadStopError(err error) bool { +func isRepeatableStopError(err error) bool { eErr := rpctypes.Error(err) // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { return true } // only retry if unavailable @@ -40,7 +40,7 @@ func isReadStopError(err error) bool { return ev.Code() != codes.Unavailable } -func isWriteStopError(err error) bool { +func isNonRepeatableStopError(err error) bool { ev, _ := status.FromError(err) if ev.Code() != codes.Unavailable { return true @@ -48,124 +48,160 @@ func isWriteStopError(err error) bool { return rpctypes.ErrorDesc(err) != "there is no address available" } -func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRpcFunc { +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { - if err := f(rpcCtx); err == nil || isStop(err) { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { return err } - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() + pinned := c.balancer.pinned() + err := f(rpcCtx) + if err == nil { + return nil + } + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) + } + + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } + } + + if isStop(err) { + return err } } } } -func (c *Client) newAuthRetryWrapper() retryRpcFunc { +func (c *Client) newAuthRetryWrapper() retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } return err // return the original error for simplicity } continue } - return err } } } -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. +// RetryKVClient implements a KVClient. func RetryKVClient(c *Client) pb.KVClient { - readRetry := c.newRetryWrapper(isReadStopError) - writeRetry := c.newRetryWrapper(isWriteStopError) + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) conn := pb.NewKVClient(c.conn) - retryBasic := &retryKVClient{&retryWriteKVClient{conn, writeRetry}, readRetry} + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} retryAuthWrapper := c.newAuthRetryWrapper() return &retryKVClient{ - &retryWriteKVClient{retryBasic, retryAuthWrapper}, + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, retryAuthWrapper} } type retryKVClient struct { - *retryWriteKVClient - readRetry retryRpcFunc + *nonRepeatableKVClient + repeatableRetry retryRPCFunc } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.readRetry(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Range(rctx, in, opts...) + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) return err }) return resp, err } -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc } -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc + lc pb.LeaseClient + repeatableRetry retryRPCFunc } -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. +// RetryLeaseClient implements a LeaseClient. func RetryLeaseClient(c *Client) pb.LeaseClient { retry := &retryLeaseClient{ pb.NewLeaseClient(c.conn), - c.newRetryWrapper(isReadStopError), + c.newRetryWrapper(isRepeatableStopError), } return &retryLeaseClient{retry, c.newAuthRetryWrapper()} } +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseLeases(rctx, in, opts...) + return err + }) + return resp, err +} + func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -173,140 +209,286 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc } -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. +// RetryClusterClient implements a ClusterClient. func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.newRetryWrapper(isWriteStopError)} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} } -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.HashKV(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.MoveLeader(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc } -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. +// RetryAuthClient implements a AuthClient. func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.newRetryWrapper(isWriteStopError)} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} } -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index ea4ec6160b..8169b62150 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -19,8 +19,6 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. @@ -136,30 +134,14 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} -func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r) if err != nil { - return nil, err + return nil, toErr(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index cfa4781269..91e6db26a0 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -762,6 +762,8 @@ func (w *watchGrpcStream) joinSubstreams() { } // openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index b4f74e7808..446e4f6b87 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -59,7 +59,7 @@ var ( ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() - ErrGRPCNotLeader = status.New(codes.Unavailable, "etcdserver: not leader").Err() + ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 33ba17fe12..555618e6f0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -22,6 +22,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "math/big" "net" @@ -58,7 +59,7 @@ func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listene type TLSInfo struct { CertFile string KeyFile string - CAFile string + CAFile string // TODO: deprecate this in v4 TrustedCAFile string ClientCertAuth bool CRLFile string @@ -76,6 +77,9 @@ type TLSInfo struct { // parseFunc exists to simplify testing. Typically, parseFunc // should be left nil. In that case, tls.X509KeyPair will be used. parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string } func (info TLSInfo) String() string { @@ -174,6 +178,20 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } + + if info.AllowedCN != "" { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if info.AllowedCN == chains[0].Subject.CommonName { + return nil + } + } + } + return errors.New("CommonName authentication failed") + } + } + // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { diff --git a/vendor/github.com/denisenkom/go-mssqldb/buf.go b/vendor/github.com/denisenkom/go-mssqldb/buf.go index 98851d2556..365acd4833 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/buf.go +++ b/vendor/github.com/denisenkom/go-mssqldb/buf.go @@ -24,16 +24,20 @@ type header struct { type tdsBuffer struct { transport io.ReadWriteCloser + packetSize int + // Write fields. - wbuf []byte - wpos uint16 + wbuf []byte + wpos int + wPacketSeq byte + wPacketType packetType // Read fields. rbuf []byte - rpos uint16 - rsize uint16 + rpos int + rsize int final bool - packet_type packetType + rPacketType packetType // afterFirst is assigned to right after tdsBuffer is created and // before the first use. It is executed after the first packet is @@ -42,61 +46,55 @@ type tdsBuffer struct { } func newTdsBuffer(bufsize uint16, transport io.ReadWriteCloser) *tdsBuffer { - w := new(tdsBuffer) - w.wbuf = make([]byte, bufsize) - w.rbuf = make([]byte, bufsize) - w.wpos = 0 - w.rpos = 8 - w.transport = transport - return w -} - -func (rw *tdsBuffer) ResizeBuffer(packetsizei int) { - if len(rw.rbuf) != packetsizei { - newbuf := make([]byte, packetsizei) - copy(newbuf, rw.rbuf) - rw.rbuf = newbuf - } - if len(rw.wbuf) != packetsizei { - newbuf := make([]byte, packetsizei) - copy(newbuf, rw.wbuf) - rw.wbuf = newbuf + return &tdsBuffer{ + packetSize: int(bufsize), + wbuf: make([]byte, 1<<16), + rbuf: make([]byte, 1<<16), + rpos: 8, + transport: transport, } } -func (w *tdsBuffer) PackageSize() uint32 { - return uint32(len(w.wbuf)) +func (rw *tdsBuffer) ResizeBuffer(packetSize int) { + rw.packetSize = packetSize +} + +func (w *tdsBuffer) PackageSize() int { + return w.packetSize } func (w *tdsBuffer) flush() (err error) { - // writing packet size - binary.BigEndian.PutUint16(w.wbuf[2:], w.wpos) + // Write packet size. + w.wbuf[0] = byte(w.wPacketType) + binary.BigEndian.PutUint16(w.wbuf[2:], uint16(w.wpos)) + w.wbuf[6] = w.wPacketSeq - // writing packet into underlying transport + // Write packet into underlying transport. if _, err = w.transport.Write(w.wbuf[:w.wpos]); err != nil { return err } + // It is possible to create a whole new buffer after a flush. + // Useful for debugging. Normally reuse the buffer. + // w.wbuf = make([]byte, 1<<16) - // execute afterFirst hook if it is set + // Execute afterFirst hook if it is set. if w.afterFirst != nil { w.afterFirst() w.afterFirst = nil } w.wpos = 8 - // packet number - w.wbuf[6] += 1 + w.wPacketSeq++ return nil } func (w *tdsBuffer) Write(p []byte) (total int, err error) { - total = 0 for { - copied := copy(w.wbuf[w.wpos:], p) - w.wpos += uint16(copied) + copied := copy(w.wbuf[w.wpos:w.packetSize], p) + w.wpos += copied total += copied if copied == len(p) { - break + return } if err = w.flush(); err != nil { return @@ -117,43 +115,41 @@ func (w *tdsBuffer) WriteByte(b byte) error { return nil } -func (w *tdsBuffer) BeginPacket(packet_type packetType) { - w.wbuf[0] = byte(packet_type) - w.wbuf[1] = 0 // packet is incomplete - w.wbuf[4] = 0 // spid - w.wbuf[5] = 0 - w.wbuf[6] = 1 // packet id - w.wbuf[7] = 0 // window +func (w *tdsBuffer) BeginPacket(packetType packetType) { + w.wbuf[1] = 0 // Packet is incomplete. This byte is set again in FinishPacket. w.wpos = 8 + w.wPacketSeq = 1 + w.wPacketType = packetType } func (w *tdsBuffer) FinishPacket() error { - w.wbuf[1] = 1 // this is last packet + w.wbuf[1] = 1 // Mark this as the last packet in the message. return w.flush() } +var headerSize = binary.Size(header{}) + func (r *tdsBuffer) readNextPacket() error { - header := header{} + h := header{} var err error - err = binary.Read(r.transport, binary.BigEndian, &header) + err = binary.Read(r.transport, binary.BigEndian, &h) if err != nil { return err } - offset := uint16(binary.Size(header)) - if int(header.Size) > len(r.rbuf) { + if int(h.Size) > len(r.rbuf) { return errors.New("Invalid packet size, it is longer than buffer size") } - if int(offset) > int(header.Size) { + if headerSize > int(h.Size) { return errors.New("Invalid packet size, it is shorter than header size") } - _, err = io.ReadFull(r.transport, r.rbuf[offset:header.Size]) + _, err = io.ReadFull(r.transport, r.rbuf[headerSize:h.Size]) if err != nil { return err } - r.rpos = offset - r.rsize = header.Size - r.final = header.Status != 0 - r.packet_type = header.PacketType + r.rpos = headerSize + r.rsize = int(h.Size) + r.final = h.Status != 0 + r.rPacketType = h.PacketType return nil } @@ -162,7 +158,7 @@ func (r *tdsBuffer) BeginRead() (packetType, error) { if err != nil { return 0, err } - return r.packet_type, nil + return r.rPacketType, nil } func (r *tdsBuffer) ReadByte() (res byte, err error) { @@ -250,6 +246,6 @@ func (r *tdsBuffer) Read(buf []byte) (copied int, err error) { } } copied = copy(buf, r.rbuf[r.rpos:r.rsize]) - r.rpos += uint16(copied) + r.rpos += copied return } diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go index b0a61e8c35..d984fd4b49 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go +++ b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go @@ -6,11 +6,10 @@ import ( "fmt" "math" "reflect" + "strconv" "strings" "time" - "strconv" - "golang.org/x/net/context" // use the "x/net/context" for backwards compatibility. ) @@ -218,7 +217,7 @@ func (b *MssqlBulk) Done() (rowcount int64, err error) { buf.FinishPacket() tokchan := make(chan tokenStruct, 5) - go processResponse(context.Background(), b.cn.sess, tokchan) + go processResponse(context.Background(), b.cn.sess, tokchan, nil) var rowCount int64 for token := range tokchan { @@ -231,7 +230,7 @@ func (b *MssqlBulk) Done() (rowcount int64, err error) { return 0, token.getError() } case error: - return 0, token + return 0, b.cn.checkBadConn(token) } } return rowCount, nil @@ -309,7 +308,8 @@ func (s *MssqlStmt) QueryMeta() (cols []columnStruct, err error) { return } tokchan := make(chan tokenStruct, 5) - go processResponse(context.Background(), s.c.sess, tokchan) + go processResponse(context.Background(), s.c.sess, tokchan, s.c.outs) + s.c.clearOuts() loop: for tok := range tokchan { switch token := tok.(type) { @@ -319,7 +319,7 @@ loop: cols = token break loop case error: - return nil, token + return nil, s.c.checkBadConn(token) } } return cols, nil @@ -501,9 +501,9 @@ func (b *MssqlBulk) makeParam(val DataValue, col columnStruct) (res Param, err e res.ti.Size = 8 res.buffer = make([]byte, 8) - ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) - dur := val.Sub(ref) - days := math.Floor(float64(dur) / float64(24*time.Hour)) + days := divFloor(val.Unix(), 24*60*60) + //25567 - number of days since Jan 1 1900 UTC to Jan 1 1970 + days = days + 25567 tm := (val.Hour()*60*60+val.Minute()*60+val.Second())*300 + int(val.Nanosecond()/10000000*3) binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days)) diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql.go b/vendor/github.com/denisenkom/go-mssqldb/mssql.go index f9b18033d9..20b5ad7a27 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql.go +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql.go @@ -6,13 +6,14 @@ import ( "encoding/binary" "errors" "fmt" - "golang.org/x/net/context" // use the "x/net/context" for backwards compatibility. "io" "math" "net" "reflect" "strings" "time" + + "golang.org/x/net/context" // use the "x/net/context" for backwards compatibility. ) var driverInstance = &MssqlDriver{processQueryText: true} @@ -58,8 +59,9 @@ type MssqlConn struct { transactionCtx context.Context processQueryText bool + connectionGood bool - connectionGood bool + outs map[string]interface{} } func (c *MssqlConn) checkBadConn(err error) error { @@ -73,11 +75,17 @@ func (c *MssqlConn) checkBadConn(err error) error { // it might be possible to revise this hack after // https://github.com/golang/go/issues/20807 // is implemented - if err == nil { + switch err { + case nil: return nil - } - if err == io.EOF { + case io.EOF: return driver.ErrBadConn + case driver.ErrBadConn: + // It is an internal programming error if driver.ErrBadConn + // is ever passed to this function. driver.ErrBadConn should + // only ever be returned in response to a *MssqlConn.connectionGood == false + // check in the external facing API. + panic("driver.ErrBadConn in checkBadConn. This should not happen.") } switch err.(type) { @@ -92,9 +100,14 @@ func (c *MssqlConn) checkBadConn(err error) error { } } +func (c *MssqlConn) clearOuts() { + c.outs = nil +} + func (c *MssqlConn) simpleProcessResp(ctx context.Context) error { tokchan := make(chan tokenStruct, 5) - go processResponse(ctx, c.sess, tokchan) + go processResponse(ctx, c.sess, tokchan, c.outs) + c.clearOuts() for tok := range tokchan { switch token := tok.(type) { case doneStruct: @@ -127,7 +140,8 @@ func (c *MssqlConn) sendCommitRequest() error { if c.sess.logFlags&logErrors != 0 { c.sess.log.Printf("Failed to send CommitXact with %v", err) } - return driver.ErrBadConn + c.connectionGood = false + return fmt.Errorf("Faild to send CommitXact: %v", err) } return nil } @@ -151,7 +165,8 @@ func (c *MssqlConn) sendRollbackRequest() error { if c.sess.logFlags&logErrors != 0 { c.sess.log.Printf("Failed to send RollbackXact with %v", err) } - return driver.ErrBadConn + c.connectionGood = false + return fmt.Errorf("Failed to send RollbackXact: %v", err) } return nil } @@ -185,7 +200,8 @@ func (c *MssqlConn) sendBeginRequest(ctx context.Context, tdsIsolation isoLevel) if c.sess.logFlags&logErrors != 0 { c.sess.log.Printf("Failed to send BeginXact with %v", err) } - return driver.ErrBadConn + c.connectionGood = false + return fmt.Errorf("Failed to send BiginXant: %v", err) } return nil } @@ -228,7 +244,12 @@ func (d *MssqlDriver) open(dsn string) (*MssqlConn, error) { } } - conn := &MssqlConn{sess: sess, transactionCtx: context.Background(), processQueryText: d.processQueryText, connectionGood: true} + conn := &MssqlConn{ + sess: sess, + transactionCtx: context.Background(), + processQueryText: d.processQueryText, + connectionGood: true, + } conn.sess.log = d.log return conn, nil } @@ -292,8 +313,15 @@ func (s *MssqlStmt) sendQuery(args []namedValue) (err error) { } if s.notifSub != nil { - headers = append(headers, headerStruct{hdrtype: dataStmHdrQueryNotif, - data: queryNotifHdr{s.notifSub.msgText, s.notifSub.options, s.notifSub.timeout}.pack()}) + headers = append(headers, + headerStruct{ + hdrtype: dataStmHdrQueryNotif, + data: queryNotifHdr{ + s.notifSub.msgText, + s.notifSub.options, + s.notifSub.timeout, + }.pack(), + }) } // no need to check number of parameters here, it is checked by database/sql @@ -302,7 +330,11 @@ func (s *MssqlStmt) sendQuery(args []namedValue) (err error) { } if s.c.sess.logFlags&logParams != 0 && len(args) > 0 { for i := 0; i < len(args); i++ { - s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i]) + if len(args[i].Name) > 0 { + s.c.sess.log.Printf("\t@%s\t%v\n", args[i].Name, args[i].Value) + } else { + s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i].Value) + } } } @@ -311,37 +343,68 @@ func (s *MssqlStmt) sendQuery(args []namedValue) (err error) { if s.c.sess.logFlags&logErrors != 0 { s.c.sess.log.Printf("Failed to send SqlBatch with %v", err) } - return driver.ErrBadConn + s.c.connectionGood = false + return fmt.Errorf("failed to send SQL Batch: %v", err) } } else { - params := make([]Param, len(args)+2) - decls := make([]string, len(args)) - params[0] = makeStrParam(s.query) - for i, val := range args { - params[i+2], err = s.makeParam(val.Value) + proc := Sp_ExecuteSql + var params []Param + if isProc(s.query) { + proc.name = s.query + params, _, err = s.makeRPCParams(args, 0) + } else { + var decls []string + params, decls, err = s.makeRPCParams(args, 2) if err != nil { return } - var name string - if len(val.Name) > 0 { - name = "@" + val.Name - } else { - name = fmt.Sprintf("@p%d", val.Ordinal) - } - params[i+2].Name = name - decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+2].ti)) + params[0] = makeStrParam(s.query) + params[1] = makeStrParam(strings.Join(decls, ",")) } - params[1] = makeStrParam(strings.Join(decls, ",")) - if err = sendRpc(s.c.sess.buf, headers, Sp_ExecuteSql, 0, params); err != nil { + if err = sendRpc(s.c.sess.buf, headers, proc, 0, params); err != nil { if s.c.sess.logFlags&logErrors != 0 { s.c.sess.log.Printf("Failed to send Rpc with %v", err) } - return driver.ErrBadConn + s.c.connectionGood = false + return fmt.Errorf("Failed to send RPC: %v", err) } } return } +// isProc takes the query text in s and determines if it is a stored proc name +// or SQL text. +func isProc(s string) bool { + if len(s) == 0 { + return false + } + if s[0] == '[' && s[len(s)-1] == ']' && strings.ContainsAny(s, "\n\r") == false { + return true + } + return !strings.ContainsAny(s, " \t\n\r;") +} + +func (s *MssqlStmt) makeRPCParams(args []namedValue, offset int) ([]Param, []string, error) { + var err error + params := make([]Param, len(args)+offset) + decls := make([]string, len(args)) + for i, val := range args { + params[i+offset], err = s.makeParam(val.Value) + if err != nil { + return nil, nil, err + } + var name string + if len(val.Name) > 0 { + name = "@" + val.Name + } else { + name = fmt.Sprintf("@p%d", val.Ordinal) + } + params[i+offset].Name = name + decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+offset].ti)) + } + return params, decls, nil +} + type namedValue struct { Name string Ordinal int @@ -376,7 +439,8 @@ func (s *MssqlStmt) queryContext(ctx context.Context, args []namedValue) (rows d func (s *MssqlStmt) processQueryResponse(ctx context.Context) (res driver.Rows, err error) { tokchan := make(chan tokenStruct, 5) ctx, cancel := context.WithCancel(ctx) - go processResponse(ctx, s.c.sess, tokchan) + go processResponse(ctx, s.c.sess, tokchan, s.c.outs) + s.c.clearOuts() // process metadata var cols []columnStruct loop: @@ -423,7 +487,8 @@ func (s *MssqlStmt) exec(ctx context.Context, args []namedValue) (res driver.Res func (s *MssqlStmt) processExec(ctx context.Context) (res driver.Result, err error) { tokchan := make(chan tokenStruct, 5) - go processResponse(ctx, s.c.sess, tokchan) + go processResponse(ctx, s.c.sess, tokchan, s.c.outs) + s.c.clearOuts() var rowCount int64 for token := range tokchan { switch token := token.(type) { @@ -634,8 +699,7 @@ func (s *MssqlStmt) makeParam(val driver.Value) (res Param, err error) { binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm)) } default: - err = fmt.Errorf("mssql: unknown type for %T", val) - return + return s.makeParamExtra(val) } return } diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go index ddf24cbf5a..9eaeb1675b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go18.go @@ -14,6 +14,9 @@ var _ driver.Pinger = &MssqlConn{} // Ping is used to check if the remote server is available and satisfies the Pinger interface. func (c *MssqlConn) Ping(ctx context.Context) error { + if !c.connectionGood { + return driver.ErrBadConn + } stmt := &MssqlStmt{c, `select 1;`, 0, nil} _, err := stmt.ExecContext(ctx, nil) return err @@ -23,6 +26,9 @@ var _ driver.ConnBeginTx = &MssqlConn{} // BeginTx satisfies ConnBeginTx. func (c *MssqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } if opts.ReadOnly { return nil, errors.New("Read-only transactions are not supported") } @@ -52,6 +58,9 @@ func (c *MssqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver. } func (c *MssqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") { return c.prepareCopyIn(query) } @@ -60,6 +69,9 @@ func (c *MssqlConn) PrepareContext(ctx context.Context, query string) (driver.St } func (s *MssqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } list := make([]namedValue, len(args)) for i, nv := range args { list[i] = namedValue(nv) @@ -68,6 +80,9 @@ func (s *MssqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) } func (s *MssqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } list := make([]namedValue, len(args)) for i, nv := range args { list[i] = namedValue(nv) diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go new file mode 100644 index 0000000000..5e8432b431 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go @@ -0,0 +1,53 @@ +// +build go1.9 + +package mssql + +import ( + "database/sql" + "database/sql/driver" + "fmt" + // "github.com/cockroachdb/apd" +) + +var _ driver.NamedValueChecker = &MssqlConn{} + +func (c *MssqlConn) CheckNamedValue(nv *driver.NamedValue) error { + switch v := nv.Value.(type) { + case sql.Out: + if c.outs == nil { + c.outs = make(map[string]interface{}) + } + c.outs[nv.Name] = v.Dest + + // Unwrap the Out value and check the inner value. + lnv := *nv + lnv.Value = v.Dest + err := c.CheckNamedValue(&lnv) + if err != nil { + if err != driver.ErrSkip { + return err + } + lnv.Value, err = driver.DefaultParameterConverter.ConvertValue(lnv.Value) + if err != nil { + return err + } + } + nv.Value = sql.Out{Dest: lnv.Value} + return nil + // case *apd.Decimal: + // return nil + default: + return driver.ErrSkip + } +} + +func (s *MssqlStmt) makeParamExtra(val driver.Value) (res Param, err error) { + switch val := val.(type) { + case sql.Out: + res, err = s.makeParam(val.Dest) + res.Flags = fByRevValue + default: + err = fmt.Errorf("mssql: unknown type for %T", val) + } + return +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go new file mode 100644 index 0000000000..27cce0bd01 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go @@ -0,0 +1,12 @@ +// +build !go1.9 + +package mssql + +import ( + "database/sql/driver" + "fmt" +) + +func (s *MssqlStmt) makeParamExtra(val driver.Value) (Param, error) { + return Param{}, fmt.Errorf("mssql: unknown type for %T", val) +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go index f853435c6e..5bed668430 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go +++ b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go @@ -59,7 +59,7 @@ type NTLMAuth struct { Workstation string } -func getAuth(user, password, service, workstation string) (Auth, bool) { +func getAuth(user, password, service, workstation string) (auth, bool) { if !strings.ContainsRune(user, '\\') { return nil, false } diff --git a/vendor/github.com/denisenkom/go-mssqldb/parser.go b/vendor/github.com/denisenkom/go-mssqldb/parser.go index 4eb0d00c37..8021ca603c 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/parser.go +++ b/vendor/github.com/denisenkom/go-mssqldb/parser.go @@ -13,7 +13,7 @@ type parser struct { paramMax int // using map as a set - namedParams map [string]bool + namedParams map[string]bool } func (p *parser) next() (rune, bool) { @@ -42,8 +42,8 @@ type stateFunc func(*parser) stateFunc func parseParams(query string) (string, int) { p := &parser{ - r: bytes.NewReader([]byte(query)), - namedParams: map [string]bool{}, + r: bytes.NewReader([]byte(query)), + namedParams: map[string]bool{}, } state := parseNormal for state != nil { diff --git a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go index a6e95051c9..9b5bc6893f 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go +++ b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go @@ -113,7 +113,7 @@ type SSPIAuth struct { ctxt SecHandle } -func getAuth(user, password, service, workstation string) (Auth, bool) { +func getAuth(user, password, service, workstation string) (auth, bool) { if user == "" { return &SSPIAuth{Service: service}, true } diff --git a/vendor/github.com/denisenkom/go-mssqldb/tds.go b/vendor/github.com/denisenkom/go-mssqldb/tds.go index 257ccbddbf..e4eb6f541b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/tds.go +++ b/vendor/github.com/denisenkom/go-mssqldb/tds.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "errors" "fmt" - "golang.org/x/net/context" // use the "x/net/context" for backwards compatibility. "io" "io/ioutil" "net" @@ -19,6 +18,8 @@ import ( "unicode" "unicode/utf16" "unicode/utf8" + + "golang.org/x/net/context" // use the "x/net/context" for backwards compatibility. ) func parseInstances(msg []byte) map[string]map[string]string { @@ -630,9 +631,7 @@ func writeAllHeaders(w io.Writer, headers []headerStruct) (err error) { return nil } -func sendSqlBatch72(buf *tdsBuffer, - sqltext string, - headers []headerStruct) (err error) { +func sendSqlBatch72(buf *tdsBuffer, sqltext string, headers []headerStruct) (err error) { buf.BeginPacket(packSQLBatch) if err = writeAllHeaders(buf, headers); err != nil { @@ -1016,8 +1015,7 @@ func parseConnectParams(dsn string) (connectParams, error) { // https://msdn.microsoft.com/en-us/library/dd341108.aspx p.keepAlive = 30 * time.Second - keepAlive, ok := params["keepalive"] - if ok { + if keepAlive, ok := params["keepalive"]; ok { timeout, err := strconv.ParseUint(keepAlive, 0, 16) if err != nil { f := "Invalid keepAlive value '%s': %s" @@ -1027,7 +1025,7 @@ func parseConnectParams(dsn string) (connectParams, error) { } encrypt, ok := params["encrypt"] if ok { - if strings.ToUpper(encrypt) == "DISABLE" { + if strings.EqualFold(encrypt, "DISABLE") { p.disableEncryption = true } else { var err error @@ -1103,7 +1101,7 @@ func parseConnectParams(dsn string) (connectParams, error) { return p, nil } -type Auth interface { +type auth interface { InitialBytes() ([]byte, error) NextBytes([]byte) ([]byte, error) Free() @@ -1171,7 +1169,6 @@ func dialConnection(p connectParams) (conn net.Conn, err error) { f := "Unable to open tcp connection with host '%v:%v': %v" return nil, fmt.Errorf(f, p.host, p.port, err.Error()) } - return conn, err } @@ -1254,8 +1251,7 @@ initiate_connection: if p.certificate != "" { pem, err := ioutil.ReadFile(p.certificate) if err != nil { - f := "Cannot read certificate '%s': %s" - return nil, fmt.Errorf(f, p.certificate, err.Error()) + return nil, fmt.Errorf("Cannot read certificate %q: %v", p.certificate, err) } certs := x509.NewCertPool() certs.AppendCertsFromPEM(pem) @@ -1269,11 +1265,11 @@ initiate_connection: toconn.buf = outbuf tlsConn := tls.Client(toconn, &config) err = tlsConn.Handshake() + toconn.buf = nil outbuf.transport = tlsConn if err != nil { - f := "TLS Handshake failed: %s" - return nil, fmt.Errorf(f, err.Error()) + return nil, fmt.Errorf("TLS Handshake failed: %v", err) } if encrypt == encryptOff { outbuf.afterFirst = func() { @@ -1284,7 +1280,7 @@ initiate_connection: login := login{ TDSVersion: verTDS74, - PacketSize: outbuf.PackageSize(), + PacketSize: uint32(outbuf.PackageSize()), Database: p.database, OptionFlags2: fODBC, // to get unlimited TEXTSIZE HostName: p.workstation, @@ -1313,7 +1309,7 @@ initiate_connection: var sspi_msg []byte continue_login: tokchan := make(chan tokenStruct, 5) - go processResponse(context.Background(), &sess, tokchan) + go processResponse(context.Background(), &sess, tokchan, nil) success := false for tok := range tokchan { switch token := tok.(type) { diff --git a/vendor/github.com/denisenkom/go-mssqldb/token.go b/vendor/github.com/denisenkom/go-mssqldb/token.go index 0e7d55214d..328b39797b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/token.go +++ b/vendor/github.com/denisenkom/go-mssqldb/token.go @@ -3,13 +3,12 @@ package mssql import ( "encoding/binary" "errors" + "fmt" "io" "net" "strconv" "strings" - "database/sql/driver" - "golang.org/x/net/context" ) @@ -24,6 +23,7 @@ const ( tokenOrder token = 169 // 0xA9 tokenError token = 170 // 0xAA tokenInfo token = 171 // 0xAB + tokenReturnValue token = 0xAC tokenLoginAck token = 173 // 0xad tokenRow token = 209 // 0xd1 tokenNbcRow token = 210 // 0xd2 @@ -519,7 +519,29 @@ func parseInfo(r *tdsBuffer) (res Error) { return } -func processSingleResponse(sess *tdsSession, ch chan tokenStruct) { +// https://msdn.microsoft.com/en-us/library/dd303881.aspx +func parseReturnValue(r *tdsBuffer) (nv namedValue) { + /* + ParamOrdinal + ParamName + Status + UserType + Flags + TypeInfo + CryptoMetadata + Value + */ + r.uint16() + nv.Name = r.BVarChar() + r.byte() + r.uint32() // UserType (uint16 prior to 7.2) + r.uint16() + ti := readTypeInfo(r) + nv.Value = ti.Reader(&ti, r) + return +} + +func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { defer func() { if err := recover(); err != nil { if sess.logFlags&logErrors != 0 { @@ -539,7 +561,7 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) { return } if packet_type != packReply { - badStreamPanic(driver.ErrBadConn) + badStreamPanic(fmt.Errorf("unexpected packet type in reply: got %v, expected %v", packet_type, packReply)) } var columns []columnStruct errs := make([]Error, 0, 5) @@ -614,12 +636,38 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) { if sess.logFlags&logMessages != 0 { sess.log.Println(info.Message) } + case tokenReturnValue: + nv := parseReturnValue(sess.buf) + if len(nv.Name) > 0 { + name := nv.Name[1:] // Remove the leading "@". + if ov, has := outs[name]; has { + err = scanIntoOut(nv.Value, ov) + if err != nil { + fmt.Println("scan error", err) + ch <- err + } + } + } default: - badStreamPanic(driver.ErrBadConn) + badStreamPanic(fmt.Errorf("unknown token type returned: %v", token)) } } } +func scanIntoOut(fromServer, scanInto interface{}) error { + switch fs := fromServer.(type) { + case int64: + switch si := scanInto.(type) { + case *int64: + *si = fs + default: + return fmt.Errorf("unsupported scan into type %[1]T for server type %[2]T", scanInto, fromServer) + } + return nil + } + return fmt.Errorf("unsupported type from server %[1]T=%[1]v", fromServer) +} + type parseRespIter byte const ( @@ -735,7 +783,7 @@ func (ts *parseResp) iter(ctx context.Context, ch chan tokenStruct, tokChan chan } } -func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct) { +func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { ts := &parseResp{ sess: sess, ctxDone: ctx.Done(), @@ -755,7 +803,7 @@ func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct) ts.dlog("initiating response reading") tokChan := make(chan tokenStruct) - go processSingleResponse(sess, tokChan) + go processSingleResponse(sess, tokChan, outs) // Loop over multiple tokens in response. tokensLoop: diff --git a/vendor/github.com/denisenkom/go-mssqldb/types.go b/vendor/github.com/denisenkom/go-mssqldb/types.go index 1270c95a4b..6832e2ec8b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/types.go +++ b/vendor/github.com/denisenkom/go-mssqldb/types.go @@ -874,6 +874,10 @@ func decodeUdt(ti typeInfo, buf []byte) []byte { // column type "bigint" this should return "reflect.TypeOf(int64(0))". func makeGoLangScanType(ti typeInfo) reflect.Type { switch ti.TypeId { + case typeInt1: + return reflect.TypeOf(int64(0)) + case typeInt2: + return reflect.TypeOf(int64(0)) case typeInt4: return reflect.TypeOf(int64(0)) case typeInt8: @@ -1086,6 +1090,10 @@ func makeDecl(ti typeInfo) string { // "TIMESTAMP". func makeGoLangTypeName(ti typeInfo) string { switch ti.TypeId { + case typeInt1: + return "TINYINT" + case typeInt2: + return "SMALLINT" case typeInt4: return "INT" case typeInt8: @@ -1189,6 +1197,10 @@ func makeGoLangTypeName(ti typeInfo) string { // bytea(30) (30, true) func makeGoLangTypeLength(ti typeInfo) (int64, bool) { switch ti.TypeId { + case typeInt1: + return 0, false + case typeInt2: + return 0, false case typeInt4: return 0, false case typeInt8: @@ -1304,6 +1316,10 @@ func makeGoLangTypeLength(ti typeInfo) (int64, bool) { // bytea(30) (30, true) func makeGoLangTypePrecisionScale(ti typeInfo) (int64, int64, bool) { switch ti.TypeId { + case typeInt1: + return 0, 0, false + case typeInt2: + return 0, 0, false case typeInt4: return 0, 0, false case typeInt8: diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md index b605b45093..c21551f6bb 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -1,5 +1,11 @@ ## `jwt-go` Version History +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + #### 3.0.0 * **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 4ca9ccac72..db37f1fe4e 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -179,10 +179,7 @@ type ImageBuildOptions struct { ExtraHosts []string // List of extra hosts Target string SessionID string - - // TODO @jhowardmsft LCOW Support: This will require extending to include - // `Platform string`, but is omitted for now as it's hard-coded temporarily - // to avoid API changes. + Platform string } // ImageBuildResponse holds information @@ -195,7 +192,8 @@ type ImageBuildResponse struct { // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. } // ImageImportSource holds source information for ImageImport @@ -206,9 +204,10 @@ type ImageImportSource struct { // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image } // ImageListOptions holds parameters to filter the list of images with. @@ -229,6 +228,7 @@ type ImagePullOptions struct { All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc + Platform string } // RequestPrivilegeFunc is a function interface that diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index e4d2ce6e36..20c19f2132 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -16,7 +16,6 @@ type ContainerCreateConfig struct { HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool - Platform string } // ContainerRmConfig holds arguments for the container remove diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go index 594cccf2fb..f46b8ee711 100644 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -29,9 +29,9 @@ var ( // ValidateHost validates that the specified string is a valid host and returns it. func ValidateHost(val string) (string, error) { host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost + // The empty string means default and is not handled by parseDaemonHost if host != "" { - _, err := parseDockerDaemonHost(host) + _, err := parseDaemonHost(host) if err != nil { return val, err } @@ -52,7 +52,7 @@ func ParseHost(defaultToTLS bool, val string) (string, error) { } } else { var err error - host, err = parseDockerDaemonHost(host) + host, err = parseDaemonHost(host) if err != nil { return val, err } @@ -60,9 +60,9 @@ func ParseHost(defaultToTLS bool, val string) (string, error) { return host, nil } -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// parseDaemonHost parses the specified address and returns an address that will be used as the host. // Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { +func parseDaemonHost(addr string) (string, error) { addrParts := strings.SplitN(addr, "://", 2) if len(addrParts) == 1 && addrParts[0] != "" { addrParts = []string{"tcp", addrParts[0]} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 876e605680..aa55637565 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -20,7 +20,6 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" "github.com/sirupsen/logrus" ) @@ -1095,36 +1094,42 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() + errC := make(chan error, 1) - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() + go func() { + defer close(errC) - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + errC <- func() error { + defer w.Close() - if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { - return err - } + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() defer func() { if er := <-errC; err == nil && er != nil { err = er diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index ac4a348d5a..02e95adff5 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // Currently go does not fill in the major/minors if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert } } @@ -77,14 +77,6 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil } -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go index 8e96d961f3..e9eb478fe3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -294,7 +294,7 @@ func OverlayChanges(layers []string, rw string) ([]Change, error) { func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { if fi.Mode()&os.ModeCharDevice != 0 { s := fi.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { + if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert return path, nil } } diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 298eb2ad68..d1e036d5c6 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -223,7 +223,7 @@ func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { // Ensure destination parent dir exists. dstParent, _ := SplitPathDirEntry(path) - parentDirStat, err := os.Lstat(dstParent) + parentDirStat, err := os.Stat(dstParent) if err != nil { return CopyInfo{}, err } diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab93..0000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 8701bb7fa9..ff7968f854 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -26,14 +26,19 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { + + stat, err := system.Stat(path) + if err == nil { + if !chownExisting { + return nil + } + // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil + return lazyChown(path, ownerUID, ownerGID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} } if mkAll { @@ -60,7 +65,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { return err } } @@ -202,3 +207,20 @@ func callGetent(args string) (io.Reader, error) { } return bytes.NewReader(out), nil } + +// lazyChown performs a chown only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +func lazyChown(p string, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c31119..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8e..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index 0ba716f261..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 2117338149..0000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 09fc4cc745..6cfa464830 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -9,11 +9,14 @@ import ( "time" gotty "github.com/Nvveen/Gotty" - "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/term" units "github.com/docker/go-units" ) +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // JSONError wraps a concrete Code and Message, `Code` is // is an integer error code, `Message` is the error message. type JSONError struct { @@ -199,9 +202,9 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { return nil } if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) } if jm.ID != "" { fmt.Fprintf(out, "%s: ", jm.ID) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index c9fdfd6942..eced0219fd 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -3,6 +3,8 @@ package mount import ( "sort" "strings" + + "github.com/sirupsen/logrus" ) // GetMounts retrieves a list of mounts for the current running process. @@ -74,12 +76,18 @@ func RecursiveUnmount(target string) error { if !strings.HasPrefix(m.Mountpoint, target) { continue } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = Unmount(m.Mountpoint) + if err != nil && i == len(mounts)-1 { if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { return err } // Ignore errors for submounts and continue trying to unmount others // The final unmount should fail if there ane any submounts remaining + } else if err != nil { + logrus.Errorf("Failed to unmount %s: %v", m.Mountpoint, err) + } else if err == nil { + logrus.Debugf("Unmounted %s", m.Mountpoint) } } return nil diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go deleted file mode 100644 index 48b86771e7..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8b..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03cbc..0000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082f..0000000000 --- a/vendor/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go index 60f0514b1d..a5e5616c4f 100644 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -17,17 +17,3 @@ func GetExitCode(err error) (int, error) { } return exitCode, fmt.Errorf("failed to get exit code") } - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index e751837267..75f8f2c061 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -2,17 +2,16 @@ package system import "os" -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft +// lcowSupported determines if Linux Containers on Windows are supported. var lcowSupported = false // InitLCOW sets whether LCOW is supported or not +// TODO @jhowardmsft. +// 1. Replace with RS3 RTM build number. +// 2. Remove the getenv check when image-store is coalesced as shouldn't be needed anymore. func InitLCOW(experimental bool) { - // LCOW initialization - if experimental && os.Getenv("LCOW_SUPPORTED") != "" { + v := GetOSVersion() + if experimental && v.Build > 16270 && os.Getenv("LCOW_SUPPORTED") != "" { lcowSupported = true } - } diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 0000000000..b88c11e316 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,58 @@ +package system + +import ( + "fmt" + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ValidatePlatform(platform *specs.Platform) error { + platform.Architecture = strings.ToLower(platform.Architecture) + platform.OS = strings.ToLower(platform.OS) + // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do + // not support anything except operating system. + if platform.Architecture != "" { + return fmt.Errorf("invalid platform architecture %q", platform.Architecture) + } + if platform.OS != "" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return fmt.Errorf("invalid platform os %q", platform.OS) + } + } + if len(platform.OSFeatures) != 0 { + return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) + } + if platform.OSVersion != "" { + return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) + } + if platform.Variant != "" { + return fmt.Errorf("invalid platform variant %q", platform.Variant) + } + return nil +} + +// ParsePlatform parses a platform string in the format os[/arch[/variant] +// into an OCI image-spec platform structure. +// TODO This is a temporary function - can be replaced by parsing from +// https://github.com/containerd/containerd/pull/1403/files at a later date. +// @jhowardmsft +func ParsePlatform(in string) *specs.Platform { + p := &specs.Platform{} + elements := strings.SplitN(strings.ToLower(in), "/", 3) + if len(elements) == 3 { + p.Variant = elements[2] + } + if len(elements) >= 2 { + p.Architecture = elements[1] + } + if len(elements) >= 1 { + p.OS = elements[0] + } + return p +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go deleted file mode 100644 index 925776e789..0000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo CFLAGS: -std=c99 -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index af79a65383..2200ec42da 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -18,5 +18,5 @@ func Mknod(path string, mode uint32, dev int) error { // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) + return uint32(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go index 4160616f43..034c33c877 100644 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -14,9 +14,9 @@ const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/s // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . -func DefaultPathEnv(platform string) string { +func DefaultPathEnv(os string) string { if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { + if os != runtime.GOOS { return defaultUnixPathEnv } // Deliberately empty on Windows containers on Windows as the default path will be set by diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 0000000000..5973c46de9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go index 101b569a56..c453adcdb9 100644 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -26,7 +26,7 @@ func EnsureRemoveAll(dir string) error { // track retries exitOnErr := make(map[string]int) - maxRetry := 5 + maxRetry := 50 // Attempt to unmount anything beneath this dir first mount.RecursiveUnmount(dir) diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index 50234affc0..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for unix.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios unix.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY) - newState.Oflag &^= unix.OPOST - newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - newState.Cflag &^= (unix.CSIZE | unix.PARENB) - newState.Cflag |= unix.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go index c0332c3cdb..b6819b3426 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -23,14 +23,7 @@ type Winsize struct { Width uint16 } -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console var vtInputSupported bool // StdStreams returns the standard streams (stdin, stdout, stderr). @@ -40,8 +33,8 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { var emulateStdin, emulateStdout, emulateStderr bool fd := os.Stdin.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { emulateStdin = true } else { vtInputSupported = true @@ -53,21 +46,21 @@ func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { fd = os.Stdout.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStdout = true } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } fd = os.Stderr.Fd() if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { emulateStderr = true } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) } } @@ -183,9 +176,9 @@ func SetRawTerminalOutput(fd uintptr) (*State, error) { return nil, err } - // Ignore failures, since disableNewlineAutoReturn might not be supported on this + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) return state, err } @@ -215,7 +208,7 @@ func MakeRaw(fd uintptr) (*State, error) { mode |= winterm.ENABLE_INSERT_MODE mode |= winterm.ENABLE_QUICK_EDIT_MODE if vtInputSupported { - mode |= enableVirtualTerminalInput + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT } err = winterm.SetConsoleMode(fd, mode) diff --git a/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go deleted file mode 100644 index 39c1d3207c..0000000000 --- a/vendor/github.com/docker/docker/pkg/term/winsize_solaris_cgo.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index 0adca766fb..d90aaa22e4 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,8 @@ Change history of go-restful = +2017-09-13 +- added route condition functions using `.If(func)` in route building. + 2017-02-16 - solved issue #304, make operation names unique diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index 694ebe973e..002a08d965 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -56,7 +56,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go index 511444ac68..9e81224164 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -41,9 +41,29 @@ func (r RouterJSR311) SelectRoute( // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { + ifOk := []Route{} + for _, each := range routes { + ok := true + for _, fn := range each.If { + if !fn(httpRequest) { + ok = false + break + } + } + if ok { + ifOk = append(ifOk, each) + } + } + if len(ifOk) == 0 { + if trace { + traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) + } + return nil, NewError(http.StatusNotFound, "404: Not Found") + } + // http method methodOk := []Route{} - for _, each := range routes { + for _, each := range ifOk { if httpRequest.Method == each.Method { methodOk = append(methodOk, each) } diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go index 4514eadcfa..5c1b34251c 100644 --- a/vendor/github.com/emicklei/go-restful/options_filter.go +++ b/vendor/github.com/emicklei/go-restful/options_filter.go @@ -15,7 +15,15 @@ func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterCha chain.ProcessFilter(req, resp) return } - resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ",")) + + archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) + methods := strings.Join(c.computeAllowedMethods(req), ",") + origin := req.Request.Header.Get(HEADER_Origin) + + resp.AddHeader(HEADER_Allow, methods) + resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) + resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) + resp.AddHeader(HEADER_AccessControlAllowMethods, methods) } // OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go index e11c8162a7..e8793304b1 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -19,8 +19,30 @@ const ( // FormParameterKind = indicator of Request parameter type "form" FormParameterKind + + // CollectionFormatCSV comma separated values `foo,bar` + CollectionFormatCSV = CollectionFormat("csv") + + // CollectionFormatSSV space separated values `foo bar` + CollectionFormatSSV = CollectionFormat("ssv") + + // CollectionFormatTSV tab separated values `foo\tbar` + CollectionFormatTSV = CollectionFormat("tsv") + + // CollectionFormatPipes pipe separated values `foo|bar` + CollectionFormatPipes = CollectionFormat("pipes") + + // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single + // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters + CollectionFormatMulti = CollectionFormat("multi") ) +type CollectionFormat string + +func (cf CollectionFormat) String() string { + return string(cf) +} + // Parameter is for documententing the parameter used in a Http Request // ParameterData kinds are Path,Query and Body type Parameter struct { @@ -36,6 +58,7 @@ type ParameterData struct { AllowableValues map[string]string AllowMultiple bool DefaultValue string + CollectionFormat string } // Data returns the state of the Parameter @@ -112,3 +135,9 @@ func (p *Parameter) Description(doc string) *Parameter { p.data.Description = doc return p } + +// CollectionFormat sets the collection format for an array type +func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { + p.data.CollectionFormat = format.String() + return p +} diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go index c69c20e320..4d987d130b 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -5,7 +5,9 @@ package restful // that can be found in the LICENSE file. import ( + "bufio" "errors" + "net" "net/http" ) @@ -19,17 +21,19 @@ var PrettyPrintResponses = true // It provides several convenience methods to prepare and write response content. type Response struct { http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called + requestAccept string // mime-type what the Http Request says it wants to receive + routeProduces []string // mime-types what the Route says it can produce + statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) + contentLength int // number of bytes written for the response body + prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called + hijacker http.Hijacker // if underlying ResponseWriter supports it } // NewResponse creates a new response based on a http ResponseWriter. func NewResponse(httpWriter http.ResponseWriter) *Response { - return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types + hijacker, _ := httpWriter.(http.Hijacker) + return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} } // DefaultResponseContentType set a default. @@ -48,6 +52,16 @@ func (r Response) InternalServerError() Response { return r } +// Hijack implements the http.Hijacker interface. This expands +// the Response to fulfill http.Hijacker if the underlying +// http.ResponseWriter supports it. +func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if r.hijacker == nil { + return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") + } + return r.hijacker.Hijack() +} + // PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. func (r *Response) PrettyPrint(bePretty bool) { r.prettyPrint = bePretty diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index b028247e88..b9e346a5ef 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -13,6 +13,11 @@ import ( // RouteFunction declares the signature of a function that can be bound to a Route. type RouteFunction func(*Request, *Response) +// RouteSelectionConditionFunction declares the signature of a function that +// can be used to add extra conditional logic when selecting whether the route +// matches the HTTP request. +type RouteSelectionConditionFunction func(httpRequest *http.Request) bool + // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. type Route struct { Method string @@ -21,6 +26,7 @@ type Route struct { Path string // webservice root path + described path Function RouteFunction Filters []FilterFunction + If []RouteSelectionConditionFunction // cached values for dispatching relativePath string @@ -37,6 +43,9 @@ type Route struct { // Extra information used to store custom information about the route. Metadata map[string]interface{} + + // marks a route as deprecated + Deprecated bool } // Initialize for Route diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go index 4ab3835b65..d70f6f89eb 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -24,6 +24,7 @@ type RouteBuilder struct { httpMethod string // required function RouteFunction // required filters []FilterFunction + conditions []RouteSelectionConditionFunction typeNameHandleFunc TypeNameHandleFunction // required @@ -35,6 +36,7 @@ type RouteBuilder struct { parameters []*Parameter errorMap map[int]ResponseError metadata map[string]interface{} + deprecated bool } // Do evaluates each argument with the RouteBuilder itself. @@ -193,6 +195,12 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use +func (b *RouteBuilder) Deprecate() *RouteBuilder { + b.deprecated = true + return b +} + // ResponseError represents a response; not necessarily an error. type ResponseError struct { Code int @@ -212,6 +220,21 @@ func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { return b } +// If sets a condition function that controls matching the Route based on custom logic. +// The condition function is provided the HTTP request and should return true if the route +// should be considered. +// +// Efficiency note: the condition function is called before checking the method, produces, and +// consumes criteria, so that the correct HTTP status code can be returned. +// +// Lifecycle note: no filter functions have been called prior to calling the condition function, +// so the condition function should not depend on any context that might be set up by container +// or route filters. +func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { + b.conditions = append(b.conditions, condition) + return b +} + // If no specific Route path then set to rootPath // If no specific Produces then set to rootProduces // If no specific Consumes then set to rootConsumes @@ -254,6 +277,7 @@ func (b *RouteBuilder) Build() Route { Consumes: b.consumes, Function: b.function, Filters: b.filters, + If: b.conditions, relativePath: b.currentPath, pathExpr: pathExpr, Doc: b.doc, @@ -263,7 +287,8 @@ func (b *RouteBuilder) Build() Route { ResponseErrors: b.errorMap, ReadSample: b.readSample, WriteSample: b.writeSample, - Metadata: b.metadata} + Metadata: b.metadata, + Deprecated: b.deprecated} route.postBuild() return route } diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index 094c0a02ab..f7e18a5859 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -118,7 +118,7 @@ func (w *WebService) QueryParameter(name, description string) *Parameter { // QueryParameter creates a new Parameter of kind Query for documentation purposes. // It is initialized as not required with string as its DataType. func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} p.beQuery() return p } diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go index 8859341c1f..136a31eba9 100644 --- a/vendor/github.com/fatih/structs/tags.go +++ b/vendor/github.com/fatih/structs/tags.go @@ -5,7 +5,7 @@ import "strings" // tagOptions contains a slice of tag options type tagOptions []string -// Has returns true if the given optiton is available in tagOptions +// Has returns true if the given option is available in tagOptions func (t tagOptions) Has(opt string) bool { for _, tagOpt := range t { if tagOpt == opt { diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index 55dd8e3b37..ffb2d0d152 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -43,6 +43,7 @@ Clint Armstrong CMGS Colin Hebert Craig Jellick +Damien Lespiau Damon Wang Dan Williams Daniel, Dao Quang Minh @@ -133,6 +134,7 @@ Paul Morie Paul Weil Peter Edge Peter Jihoon Kim +Peter Teich Phil Lu Philippe Lafoucrière Radek Simko @@ -158,8 +160,10 @@ Simon Menke Skolos Soulou Sridhar Ratnakumar +Steven Jack Summer Mousa Sunjin Lee +Sunny Swaroop Ramachandra Tarsis Azevedo Tim Schindler diff --git a/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml new file mode 100644 index 0000000000..deeb07595d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/Gopkg.toml @@ -0,0 +1,28 @@ +[[constraint]] + name = "github.com/Microsoft/go-winio" + version = "v0.4.5" + +[[constraint]] + name = "github.com/docker/docker" + branch = "master" + +[[constraint]] + name = "github.com/docker/go-units" + version = "v0.3.2" + +[[constraint]] + name = "github.com/google/go-cmp" + branch = "master" + +[[constraint]] + name = "github.com/gorilla/mux" + version = "v1.5.0" + +[[constraint]] + name = "golang.org/x/net" + branch = "master" + +[[override]] + name = "github.com/Nvveen/Gotty" + source = "https://github.com/ijc25/Gotty.git" + revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c" diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile index 483aa1bb4a..6ebdcf1873 100644 --- a/vendor/github.com/fsouza/go-dockerclient/Makefile +++ b/vendor/github.com/fsouza/go-dockerclient/Makefile @@ -16,21 +16,22 @@ lint: [ -z "$$(golint . | grep -v 'type name will be used as docker.DockerInfo' | grep -v 'context.Context should be the first' | tee /dev/stderr)" ] vet: - go vet ./... + go vet $$(go list ./... | grep -v vendor) fmt: - gofmt -s -w . + gofmt -s -w $$(go list ./... | grep -v vendor) fmtcheck: - [ -z "$$(gofmt -s -d . | tee /dev/stderr)" ] + [ -z "$$(gofmt -s -d $$(go list ./... | grep -v vendor) | tee /dev/stderr)" ] testdeps: - go get -d -t ./... + go get -u github.com/golang/dep/cmd/dep + dep ensure -v pretest: testdeps lint vet fmtcheck gotest: - go test $(GO_TEST_FLAGS) ./... + go test -race $$(go list ./... | grep -v vendor) test: pretest gotest diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown index a9ffc17a0f..86824d6c5f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/README.markdown +++ b/vendor/github.com/fsouza/go-dockerclient/README.markdown @@ -6,7 +6,6 @@ This package presents a client for the Docker remote API. It also provides support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/). -It currently supports the Docker API up to version 1.23. This package also provides support for docker's network API, which is a simple passthrough to the libnetwork remote API. Note that docker's network API is @@ -111,6 +110,15 @@ Running `make test` will check all of these. If your editor does not automatically call ``gofmt -s``, `make fmt` will format all go files in this repository. +## Vendoring + +go-dockerclient uses [dep](https://github.com/golang/dep/) for vendoring. If +you're using dep, you should be able to pick go-dockerclient releases and get +the proper dependencies. + +With other vendoring tools, users might need to specify go-dockerclient's +dependencies manually. + ## Using with Docker 1.9 and Go 1.4 There's a tag for using go-dockerclient with Docker 1.9 (which requires diff --git a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml index 3d8e319cd9..01bd657bf0 100644 --- a/vendor/github.com/fsouza/go-dockerclient/appveyor.yml +++ b/vendor/github.com/fsouza/go-dockerclient/appveyor.yml @@ -5,16 +5,17 @@ clone_folder: c:\gopath\src\github.com\fsouza\go-dockerclient environment: GOPATH: c:\gopath matrix: - - GOVERSION: 1.8.3 - - GOVERSION: 1.9 + - GOVERSION: 1.8.4 + - GOVERSION: 1.9.1 install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - rmdir c:\go /s /q - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip - 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL build_script: - - go get -race -d -t ./... + - go get -u github.com/golang/dep/cmd/dep + - dep ensure -v test_script: - - go test -race ./... + - for /f "" %%G in ('go list ./... ^| find /i /v "/vendor/"') do ( go test %%G & IF ERRORLEVEL == 1 EXIT 1) matrix: fast_finish: true diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go index 625d4cd5a7..b404953fc4 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client.go +++ b/vendor/github.com/fsouza/go-dockerclient/client.go @@ -151,7 +151,6 @@ type Client struct { requestedAPIVersion APIVersion serverAPIVersion APIVersion expectedAPIVersion APIVersion - nativeHTTPClient *http.Client } // Dialer is an interface that allows network connections to be dialed @@ -344,16 +343,12 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, return c, nil } -// SetTimeout takes a timeout and applies it to both the HTTPClient and -// nativeHTTPClient. It should not be called concurrently with any other Client -// methods. +// SetTimeout takes a timeout and applies it to the HTTPClient. It should not +// be called concurrently with any other Client methods. func (c *Client) SetTimeout(t time.Duration) { if c.HTTPClient != nil { c.HTTPClient.Timeout = t } - if c.nativeHTTPClient != nil { - c.nativeHTTPClient.Timeout = t - } } func (c *Client) checkAPIVersion() error { @@ -445,12 +440,10 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e return nil, err } } - httpClient := c.HTTPClient protocol := c.endpointURL.Scheme var u string switch protocol { case unixProtocol, namedPipeProtocol: - httpClient = c.nativeHTTPClient u = c.getFakeNativeURL(path) default: u = c.getURL(path) @@ -476,7 +469,7 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e ctx = context.Background() } - resp, err := ctxhttp.Do(ctx, httpClient, req) + resp, err := ctxhttp.Do(ctx, c.HTTPClient, req) if err != nil { if strings.Contains(err.Error(), "connection refused") { return nil, ErrConnectionRefused diff --git a/vendor/github.com/fsouza/go-dockerclient/client_unix.go b/vendor/github.com/fsouza/go-dockerclient/client_unix.go index b578e03037..ab73cf2b0d 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_unix.go @@ -9,7 +9,6 @@ package docker import ( "context" "net" - "net/http" ) // initializeNativeClient initializes the native Unix domain socket client on @@ -26,5 +25,5 @@ func (c *Client) initializeNativeClient() { tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { return c.Dialer.Dial(unixProtocol, socketPath) } - c.nativeHTTPClient = &http.Client{Transport: tr} + c.HTTPClient.Transport = tr } diff --git a/vendor/github.com/fsouza/go-dockerclient/client_windows.go b/vendor/github.com/fsouza/go-dockerclient/client_windows.go index c863fb05fa..c9ecc187da 100644 --- a/vendor/github.com/fsouza/go-dockerclient/client_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/client_windows.go @@ -1,15 +1,14 @@ -// +build windows - // Copyright 2016 go-dockerclient authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build windows + package docker import ( "context" "net" - "net/http" "time" "github.com/Microsoft/go-winio" @@ -41,5 +40,5 @@ func (c *Client) initializeNativeClient() { return dialFunc(network, addr) } c.Dialer = &pipeDialer{dialFunc} - c.nativeHTTPClient = &http.Client{Transport: tr} + c.HTTPClient.Transport = tr } diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index 652deee8f8..624a7d860f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -630,6 +630,11 @@ func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error if e.Status == http.StatusConflict { return nil, ErrContainerAlreadyExists } + // Workaround for 17.09 bug returning 400 instead of 409. + // See https://github.com/moby/moby/issues/35021 + if e.Status == http.StatusBadRequest && strings.Contains(e.Message, "Conflict.") { + return nil, ErrContainerAlreadyExists + } } if err != nil { @@ -1473,7 +1478,7 @@ type LogsOptions struct { // stderr to LogsOptions.ErrorStream. // // When LogsOptions.RawTerminal is true, callers will get the raw stream on -// LogOptions.OutputStream. The caller can use libraries such as dlog +// LogsOptions.OutputStream. The caller can use libraries such as dlog // (github.com/ahmetalpbalkan/dlog). // // See https://goo.gl/krK0ZH for more details. diff --git a/vendor/github.com/fsouza/go-dockerclient/distribution.go b/vendor/github.com/fsouza/go-dockerclient/distribution.go new file mode 100644 index 0000000000..d0f8ce74cc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/distribution.go @@ -0,0 +1,26 @@ +// Copyright 2017 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/registry" +) + +// InspectDistribution returns image digest and platform information by contacting the registry +func (c *Client) InspectDistribution(name string) (*registry.DistributionInspect, error) { + path := "/distribution/" + name + "/json" + resp, err := c.do("GET", path, doOptions{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var distributionInspect registry.DistributionInspect + if err := json.NewDecoder(resp.Body).Decode(&distributionInspect); err != nil { + return nil, err + } + return &distributionInspect, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go index c54b0b0e80..13fedfb17e 100644 --- a/vendor/github.com/fsouza/go-dockerclient/env.go +++ b/vendor/github.com/fsouza/go-dockerclient/env.go @@ -162,7 +162,11 @@ func (env *Env) Map() map[string]string { m := make(map[string]string) for _, kv := range *env { parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] + if len(parts) == 1 { + m[parts[0]] = "" + } else { + m[parts[0]] = parts[1] + } } return m } diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE index 37ec93a14f..d361bbcdf5 100644 --- a/vendor/github.com/go-ini/ini/LICENSE +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -176,7 +176,7 @@ recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Unknwon Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile index ac034e5258..1316911d2d 100644 --- a/vendor/github.com/go-ini/ini/Makefile +++ b/vendor/github.com/go-ini/ini/Makefile @@ -1,4 +1,4 @@ -.PHONY: build test bench vet +.PHONY: build test bench vet coverage build: vet bench @@ -10,3 +10,6 @@ bench: vet: go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out \ No newline at end of file diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index e67d51f320..f4ff27cd30 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -101,7 +101,7 @@ skip-name-resolve By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: ```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf")) ``` The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. @@ -125,7 +125,7 @@ If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or Alternatively, you can use following `LoadOptions` to completely ignore inline comments: ```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini")) ``` ### Working with sections @@ -329,6 +329,20 @@ foo = "some value" // foo: some value bar = 'some value' // bar: some value ``` +Sometimes you downloaded file from [Crowdin](https://crowdin.com/) has values like the following (value is surrounded by double quotes and quotes in the value are escaped): + +```ini +create_repo="created repository %s" +``` + +How do you transform this to regular format automatically? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) +cfg.Section("").Key("create_repo").String() +// You got: created repository %s +``` + That's all? Hmm, no. #### Helper methods of working with values @@ -480,7 +494,7 @@ cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: ```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +cfg, err := ini.LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] <1> This slide has the fuel listed in the wrong units `)) body := cfg.Section("COMMENTS").Body() @@ -573,7 +587,7 @@ Why not? ```go type Embeded struct { - Dates []time.Time `delim:"|"` + Dates []time.Time `delim:"|" comment:"Time data"` Places []string `ini:"places,omitempty"` None []int `ini:",omitempty"` } @@ -581,10 +595,10 @@ type Embeded struct { type Author struct { Name string `ini:"NAME"` Male bool - Age int + Age int `comment:"Author's age"` GPA float64 NeverMind string `ini:"-"` - *Embeded + *Embeded `comment:"Embeded section"` } func main() { @@ -605,10 +619,13 @@ So, what do I get? ```ini NAME = Unknwon Male = true +; Author's age Age = 21 GPA = 2.8 +; Embeded section [Embeded] +; Time data Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 places = HangZhou,Boston ``` diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 0cf4194492..69aefef12e 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -94,7 +94,7 @@ skip-name-resolve 默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: ```go -cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf")) ``` 这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 @@ -118,7 +118,7 @@ key, err := sec.NewBooleanKey("skip-host-cache") 除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: ```go -cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini")) ``` ### 操作分区(Section) @@ -322,6 +322,20 @@ foo = "some value" // foo: some value bar = 'some value' // bar: some value ``` +有时您会获得像从 [Crowdin](https://crowdin.com/) 网站下载的文件那样具有特殊格式的值(值使用双引号括起来,内部的双引号被转义): + +```ini +create_repo="创建了仓库 %s" +``` + +那么,怎么自动地将这类值进行处理呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) +cfg.Section("").Key("create_repo").String() +// You got: 创建了仓库 %s +``` + 这就是全部了?哈哈,当然不是。 #### 操作键值的辅助方法 @@ -473,7 +487,7 @@ cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] 如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: ```go -cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +cfg, err := LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] <1> This slide has the fuel listed in the wrong units `)) body := cfg.Section("COMMENTS").Body() @@ -564,7 +578,7 @@ p := &Person{ ```go type Embeded struct { - Dates []time.Time `delim:"|"` + Dates []time.Time `delim:"|" comment:"Time data"` Places []string `ini:"places,omitempty"` None []int `ini:",omitempty"` } @@ -572,10 +586,10 @@ type Embeded struct { type Author struct { Name string `ini:"NAME"` Male bool - Age int + Age int `comment:"Author's age"` GPA float64 NeverMind string `ini:"-"` - *Embeded + *Embeded `comment:"Embeded section"` } func main() { @@ -596,10 +610,13 @@ func main() { ```ini NAME = Unknwon Male = true +; Author's age Age = 21 GPA = 2.8 +; Embeded section [Embeded] +; Time data Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 places = HangZhou,Boston ``` diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 0000000000..93ac50836c --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,392 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // Actual data is stored here. + sections map[string]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } else { + sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:]) + } + if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { + return nil, err + } + } + + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } else { + key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) + } + if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { + return nil, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 7f3c4d1ed1..508d60c19a 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -17,15 +17,12 @@ package ini import ( "bytes" - "errors" "fmt" "io" "io/ioutil" "os" "regexp" "runtime" - "strings" - "sync" ) const ( @@ -35,7 +32,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.28.2" + _VERSION = "1.30.3" ) // Version returns current package version literal. @@ -92,18 +89,6 @@ func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { return os.Open(s.name) } -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - // sourceData represents an object that contains content in memory. type sourceData struct { data []byte @@ -122,38 +107,6 @@ func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { return s.reader, nil } -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - options LoadOptions - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - func parseDataSource(source interface{}) (dataSource, error) { switch s := source.(type) { case string: @@ -181,6 +134,8 @@ type LoadOptions struct { AllowBooleanKeys bool // AllowShadows indicates whether to keep track of keys with same name under same section. AllowShadows bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise // conform to key/value pairs. Specify the names of those blocks here. UnparseableSections []string @@ -229,328 +184,3 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { return LoadSources(LoadOptions{AllowShadows: true}, source, others...) } - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } else if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { - return nil, err - } - } - - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { - return nil, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go index 838356af01..ab566c2c15 100644 --- a/vendor/github.com/go-ini/ini/key.go +++ b/vendor/github.com/go-ini/ini/key.go @@ -15,6 +15,7 @@ package ini import ( + "bytes" "errors" "fmt" "strconv" @@ -25,6 +26,7 @@ import ( // Key represents a key under a section. type Key struct { s *Section + Comment string name string value string isAutoIncrement bool @@ -32,8 +34,6 @@ type Key struct { isShadow bool shadows []*Key - - Comment string } // newKey simply return a key object with given values. @@ -444,11 +444,39 @@ func (k *Key) Strings(delim string) []string { return []string{} } - vals := strings.Split(str, delim) - for i := range vals { - // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) - vals[i] = strings.TrimSpace(vals[i]) + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx += 1 + if idx == len(runes) { + break + } } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + return vals } diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go index 69d5476273..f8ac8026a2 100644 --- a/vendor/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -193,7 +193,7 @@ func hasSurroundedQuote(in string, quote byte) bool { strings.IndexByte(in[1:], quote) == len(in)-2 } -func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes bool) (string, error) { line := strings.TrimLeftFunc(string(in), unicode.IsSpace) if len(line) == 0 { return "", nil @@ -204,6 +204,8 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo valQuote = `"""` } else if line[0] == '`' { valQuote = "`" + } else if unescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` } if len(valQuote) > 0 { @@ -214,6 +216,9 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo return p.readMultilines(line, line[startIdx:], valQuote) } + if unescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } return line[startIdx : pos+startIdx], nil } @@ -234,7 +239,7 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo } } - // Trim single quotes + // Trim single and double quotes if hasSurroundedQuote(line, '\'') || hasSurroundedQuote(line, '"') { line = line[1 : len(line)-1] @@ -250,7 +255,11 @@ func (f *File) parse(reader io.Reader) (err error) { } // Ignore error because default section name is never empty string. - section, _ := f.NewSection(DEFAULT_SECTION) + name := DEFAULT_SECTION + if f.options.Insensitive { + name = strings.ToLower(DEFAULT_SECTION) + } + section, _ := f.NewSection(name) var line []byte var inUnparseableSection bool @@ -321,7 +330,10 @@ func (f *File) parse(reader io.Reader) (err error) { if err != nil { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + kname, err := p.readValue(line, + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } @@ -344,7 +356,10 @@ func (f *File) parse(reader io.Reader) (err error) { p.count++ } - value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + value, err := p.readValue(line[offset:], + f.options.IgnoreContinuation, + f.options.IgnoreInlineComment, + f.options.UnescapeValueDoubleQuotes) if err != nil { return err } diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go index 94f7375ed4..d8a4026192 100644 --- a/vendor/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -54,6 +54,14 @@ func (s *Section) Body() string { return strings.TrimSpace(s.rawBody) } +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + // NewKey creates a new key to given section. func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { @@ -136,6 +144,7 @@ func (s *Section) HasKey(name string) bool { } // Haskey is a backwards-compatible name for HasKey. +// TODO: delete me in v2 func (s *Section) Haskey(name string) bool { return s.HasKey(name) } diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index eeb8dabaac..9719dc6985 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -113,7 +113,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh default: return fmt.Errorf("unsupported type '[]%s'", sliceOf) } - if isStrict { + if err != nil && isStrict { return err } @@ -166,7 +166,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() // Skip zero value - if err == nil && int(durationVal) > 0 { + if err == nil && int64(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } @@ -450,6 +450,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { // Note: fieldName can never be empty here, ignore error. sec, _ = s.f.NewSection(fieldName) } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + if err = sec.reflectFrom(field); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } @@ -461,6 +467,12 @@ func (s *Section) reflectFrom(val reflect.Value) error { if err != nil { key, _ = s.NewKey(fieldName, "") } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile index f7899f59a7..a9d351c764 100644 --- a/vendor/github.com/go-ldap/ldap/Makefile +++ b/vendor/github.com/go-ldap/ldap/Makefile @@ -7,7 +7,7 @@ IS_OLD_GO := $(shell test $(GO_VERSION) -le 2 && echo true) ifeq ($(IS_OLD_GO),true) RACE_FLAG := else - RACE_FLAG := -race + RACE_FLAG := -race -cpu 1,2,4 endif default: fmt vet lint build quicktest diff --git a/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/go-ldap/ldap/bind.go index 432efa78de..589a9d80b5 100644 --- a/vendor/github.com/go-ldap/ldap/bind.go +++ b/vendor/github.com/go-ldap/ldap/bind.go @@ -1,13 +1,9 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package ldap import ( "errors" - ber "gopkg.in/asn1-ber.v1" + "gopkg.in/asn1-ber.v1" ) // SimpleBindRequest represents a username/password bind operation diff --git a/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/go-ldap/ldap/compare.go index cc6d2af5e5..82dca33c78 100644 --- a/vendor/github.com/go-ldap/ldap/compare.go +++ b/vendor/github.com/go-ldap/ldap/compare.go @@ -1,7 +1,3 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// // File contains Compare functionality // // https://tools.ietf.org/html/rfc4511 diff --git a/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/go-ldap/ldap/conn.go index e701a9b662..96aab6b8ef 100644 --- a/vendor/github.com/go-ldap/ldap/conn.go +++ b/vendor/github.com/go-ldap/ldap/conn.go @@ -1,7 +1,3 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package ldap import ( @@ -83,20 +79,18 @@ const ( type Conn struct { conn net.Conn isTLS bool - closeCount uint32 + closing uint32 closeErr atomicValue isStartingTLS bool Debug debugging - chanConfirm chan bool + chanConfirm chan struct{} messageContexts map[int64]*messageContext chanMessage chan *messagePacket chanMessageID chan int64 - wgSender sync.WaitGroup wgClose sync.WaitGroup - once sync.Once outstandingRequests uint messageMutex sync.Mutex - requestTimeout time.Duration + requestTimeout int64 } var _ Client = &Conn{} @@ -143,7 +137,7 @@ func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { func NewConn(conn net.Conn, isTLS bool) *Conn { return &Conn{ conn: conn, - chanConfirm: make(chan bool), + chanConfirm: make(chan struct{}), chanMessageID: make(chan int64), chanMessage: make(chan *messagePacket, 10), messageContexts: map[int64]*messageContext{}, @@ -161,20 +155,20 @@ func (l *Conn) Start() { // isClosing returns whether or not we're currently closing. func (l *Conn) isClosing() bool { - return atomic.LoadUint32(&l.closeCount) > 0 + return atomic.LoadUint32(&l.closing) == 1 } // setClosing sets the closing value to true -func (l *Conn) setClosing() { - atomic.AddUint32(&l.closeCount, 1) +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) } // Close closes the connection. func (l *Conn) Close() { - l.once.Do(func() { - l.setClosing() - l.wgSender.Wait() + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.setClosing() { l.Debug.Printf("Sending quit message and waiting for confirmation") l.chanMessage <- &messagePacket{Op: MessageQuit} <-l.chanConfirm @@ -182,27 +176,25 @@ func (l *Conn) Close() { l.Debug.Printf("Closing network connection") if err := l.conn.Close(); err != nil { - log.Print(err) + log.Println(err) } l.wgClose.Done() - }) + } l.wgClose.Wait() } // SetTimeout sets the time after a request is sent that a MessageTimeout triggers func (l *Conn) SetTimeout(timeout time.Duration) { if timeout > 0 { - l.requestTimeout = timeout + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) } } // Returns the next available messageID func (l *Conn) nextMessageID() int64 { - if l.chanMessageID != nil { - if messageID, ok := <-l.chanMessageID; ok { - return messageID - } + if messageID, ok := <-l.chanMessageID; ok { + return messageID } return 0 } @@ -327,12 +319,12 @@ func (l *Conn) finishMessage(msgCtx *messageContext) { } func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() if l.isClosing() { return false } - l.wgSender.Add(1) l.chanMessage <- message - l.wgSender.Done() return true } @@ -352,7 +344,6 @@ func (l *Conn) processMessages() { delete(l.messageContexts, messageID) } close(l.chanMessageID) - l.chanConfirm <- true close(l.chanConfirm) }() @@ -361,11 +352,7 @@ func (l *Conn) processMessages() { select { case l.chanMessageID <- messageID: messageID++ - case message, ok := <-l.chanMessage: - if !ok { - l.Debug.Printf("Shutting down - message channel is closed") - return - } + case message := <-l.chanMessage: switch message.Op { case MessageQuit: l.Debug.Printf("Shutting down - quit message received") @@ -388,14 +375,15 @@ func (l *Conn) processMessages() { l.messageContexts[message.MessageID] = message.Context // Add timeout if defined - if l.requestTimeout > 0 { + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { go func() { defer func() { if err := recover(); err != nil { log.Printf("ldap: recovered panic in RequestTimeout: %v", err) } }() - time.Sleep(l.requestTimeout) + time.Sleep(requestTimeout) timeoutMessage := &messagePacket{ Op: MessageTimeout, MessageID: message.MessageID, diff --git a/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/go-ldap/ldap/control.go index 342f325ca6..b7f181f2b4 100644 --- a/vendor/github.com/go-ldap/ldap/control.go +++ b/vendor/github.com/go-ldap/ldap/control.go @@ -1,7 +1,3 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package ldap import ( diff --git a/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/go-ldap/ldap/debug.go index b8a7ecbff1..7279fc2518 100644 --- a/vendor/github.com/go-ldap/ldap/debug.go +++ b/vendor/github.com/go-ldap/ldap/debug.go @@ -6,7 +6,7 @@ import ( "gopkg.in/asn1-ber.v1" ) -// debbuging type +// debugging type // - has a Printf method to write the debug output type debugging bool diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go index 857b2ca73f..1ee9a1b922 100644 --- a/vendor/github.com/go-ldap/ldap/dn.go +++ b/vendor/github.com/go-ldap/ldap/dn.go @@ -1,8 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// File contains DN parsing functionallity +// File contains DN parsing functionality // // https://tools.ietf.org/html/rfc4514 // @@ -52,7 +48,7 @@ import ( "fmt" "strings" - ber "gopkg.in/asn1-ber.v1" + "gopkg.in/asn1-ber.v1" ) // AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/go-ldap/ldap/filter.go index 3858a2865c..4cc4207bec 100644 --- a/vendor/github.com/go-ldap/ldap/filter.go +++ b/vendor/github.com/go-ldap/ldap/filter.go @@ -1,7 +1,3 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package ldap import ( diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/go-ldap/ldap/ldap.go index d27e639d0f..fe774b5405 100644 --- a/vendor/github.com/go-ldap/ldap/ldap.go +++ b/vendor/github.com/go-ldap/ldap/ldap.go @@ -1,7 +1,3 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - package ldap import ( @@ -9,7 +5,7 @@ import ( "io/ioutil" "os" - ber "gopkg.in/asn1-ber.v1" + "gopkg.in/asn1-ber.v1" ) // LDAP Application Codes diff --git a/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/go-ldap/ldap/modify.go index e4ab6cefc7..2e0353b847 100644 --- a/vendor/github.com/go-ldap/ldap/modify.go +++ b/vendor/github.com/go-ldap/ldap/modify.go @@ -1,7 +1,3 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// // File contains Modify functionality // // https://tools.ietf.org/html/rfc4511 diff --git a/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/go-ldap/ldap/passwdmodify.go index 26110ccf4a..7d8246fd18 100644 --- a/vendor/github.com/go-ldap/ldap/passwdmodify.go +++ b/vendor/github.com/go-ldap/ldap/passwdmodify.go @@ -135,10 +135,10 @@ func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*Pa extendedResponse := packet.Children[1] for _, child := range extendedResponse.Children { if child.Tag == 11 { - passwordModifyReponseValue := ber.DecodePacket(child.Data.Bytes()) - if len(passwordModifyReponseValue.Children) == 1 { - if passwordModifyReponseValue.Children[0].Tag == 0 { - result.GeneratedPassword = ber.DecodeString(passwordModifyReponseValue.Children[0].Data.Bytes()) + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) } } } diff --git a/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/go-ldap/ldap/search.go index 2a99894c94..97ae95bf79 100644 --- a/vendor/github.com/go-ldap/ldap/search.go +++ b/vendor/github.com/go-ldap/ldap/search.go @@ -1,7 +1,3 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// // File contains Search functionality // // https://tools.ietf.org/html/rfc4511 diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index b4429a21c8..7af80691fb 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -352,14 +352,12 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref { } func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { - tgt := reflect.ValueOf(target) if tgt.Kind() != reflect.Ptr { return fmt.Errorf("resolve ref: target needs to be a pointer") } oldRef := currentRef - if currentRef != nil { debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) nextRef := nextRef(node, ref, currentRef.GetPointer()) @@ -467,8 +465,6 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{} return err } - r.currentRef = currentRef - return nil } @@ -645,14 +641,18 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* return resolver.root.(*Schema), nil } - // t is the new expanded schema var t *Schema - + var basePath string + b, _ := json.Marshal(target) + debugLog("Target is: %s", string(b)) for target.Ref.String() != "" { if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { return &target, nil } - + basePath = target.Ref.RemoteURI() + debugLog("\n\n\n\n\nbasePath: %s", basePath) + b, _ := json.Marshal(target) + debugLog("calling Resolve with target: %s", string(b)) if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { return &target, err } @@ -666,7 +666,13 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* target = *t } } - + if target.Ref.String() == "" { + b, _ := json.Marshal(target) + debugLog("before: %s", string(b)) + modifyRefs(&target, basePath) + b, _ = json.Marshal(target) + debugLog("after: %s", string(b)) + } t, err := expandItems(target, parentRefs, resolver) if shouldStopOnError(err, resolver.options) { return &target, err @@ -675,6 +681,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (* target = *t } + resolver.reset() + for i := range target.AllOf { t, err := expandSchema(target.AllOf[i], parentRefs, resolver) if shouldStopOnError(err, resolver.options) { diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index 46944fb699..07ac88e669 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -178,9 +178,14 @@ func (i *Items) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &simpleSchema); err != nil { return err } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } i.Refable = ref i.CommonValidations = validations i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible return nil } @@ -198,7 +203,11 @@ func (i Items) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return swag.ConcatJSON(b3, b1, b2), nil + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil } // JSONLookup look up a value by the json property name diff --git a/vendor/github.com/go-openapi/spec/refmodifier.go b/vendor/github.com/go-openapi/spec/refmodifier.go new file mode 100644 index 0000000000..8482608ea7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/refmodifier.go @@ -0,0 +1,82 @@ +// Copyright 2017 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" +) + +func modifyItemsRefs(target *Schema, basePath string) { + if target.Items != nil { + if target.Items.Schema != nil { + modifyRefs(target.Items.Schema, basePath) + } + for i := range target.Items.Schemas { + s := target.Items.Schemas[i] + modifyRefs(&s, basePath) + target.Items.Schemas[i] = s + } + } +} + +func modifyRefs(target *Schema, basePath string) { + if target.Ref.String() != "" { + if target.Ref.RemoteURI() == basePath { + return + } + newURL := fmt.Sprintf("%s%s", basePath, target.Ref.String()) + target.Ref, _ = NewRef(newURL) + } + + modifyItemsRefs(target, basePath) + for i := range target.AllOf { + modifyRefs(&target.AllOf[i], basePath) + } + for i := range target.AnyOf { + modifyRefs(&target.AnyOf[i], basePath) + } + for i := range target.OneOf { + modifyRefs(&target.OneOf[i], basePath) + } + if target.Not != nil { + modifyRefs(target.Not, basePath) + } + for k := range target.Properties { + s := target.Properties[k] + modifyRefs(&s, basePath) + target.Properties[k] = s + } + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + modifyRefs(target.AdditionalProperties.Schema, basePath) + } + for k := range target.PatternProperties { + s := target.PatternProperties[k] + modifyRefs(&s, basePath) + target.PatternProperties[k] = s + } + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + modifyRefs(target.Dependencies[k].Schema, basePath) + } + } + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + modifyRefs(target.AdditionalItems.Schema, basePath) + } + for k := range target.Definitions { + s := target.Definitions[k] + modifyRefs(&s, basePath) + target.Definitions[k] = s + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 646159940d..ac36be9a71 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -34,6 +34,7 @@ INADA Naoki Jacek Szwec James Harr Jeff Hodges +Jeffrey Charles Jian Zhen Joshua Prunier Julien Lefevre diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 925a69cf1d..d24aaa0f0c 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -16,10 +16,11 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * [Parameters](#parameters) * [Examples](#examples) * [Connection pool and timeouts](#connection-pool-and-timeouts) + * [context.Context Support](#contextcontext-support) + * [ColumnType Support](#columntype-support) * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) * [time.Time support](#timetime-support) * [Unicode support](#unicode-support) - * [context.Context Support](#contextcontext-support) * [Testing / Development](#testing--development) * [License](#license) @@ -47,7 +48,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac ## Installation Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: ```bash -$ go get github.com/go-sql-driver/mysql +$ go get -u github.com/go-sql-driver/mysql ``` Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. @@ -101,7 +102,8 @@ See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which netw In general you should use an Unix domain socket if available and TCP otherwise for best performance. #### Address -For TCP and UDP networks, addresses have the form `host:port`. +For TCP and UDP networks, addresses have the form `host[:port]`. +If `port` is omitted, the default port will be used. If `host` is a literal IPv6 address, it must be enclosed in square brackets. The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. @@ -231,10 +233,10 @@ Please keep in mind, that param values must be [url.QueryEscape](https://golang. ##### `maxAllowedPacket` ``` Type: decimal number -Default: 0 +Default: 4194304 ``` -Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server. +Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. ##### `multiStatements` @@ -277,7 +279,7 @@ Default: false ``` -`rejectreadOnly=true` causes the driver to reject read-only connections. This +`rejectReadOnly=true` causes the driver to reject read-only connections. This is for a possible race condition during an automatic failover, where the mysql client gets connected to a read-only replica after the failover. @@ -292,20 +294,11 @@ If you are not relying on read-only transactions to reject writes that aren't supposed to happen, setting this on some MySQL providers (such as AWS Aurora) is safer for failovers. +Note that ERROR 1290 can be returned for a `read-only` server and this option will +cause a retry for that error. However the same error number is used for some +other cases. You should ensure your application will never cause an ERROR 1290 +except for `read-only` mode when enabling this option. -##### `strict` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations. - -A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable. - -By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. ##### `timeout` @@ -316,6 +309,7 @@ Default: OS default Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + ##### `tls` ``` @@ -326,6 +320,7 @@ Default: false `tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + ##### `writeTimeout` ``` @@ -344,9 +339,9 @@ Any other parameters are interpreted as system variables: * `=%27%27`: `SET =''` Rules: -* The values for string variables must be quoted with ' +* The values for string variables must be quoted with `'`. * The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! - (which implies values of string variables must be wrapped with `%27`) + (which implies values of string variables must be wrapped with `%27`). Examples: * `autocommit=1`: `SET autocommit=1` @@ -411,6 +406,13 @@ user:password@/ ### Connection pool and timeouts The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. +## `ColumnType` Support +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. + +## `context.Context` Support +Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. +See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. + ### `LOAD DATA LOCAL INFILE` support For this feature you need direct access to the package. Therefore you must change the import path (no `_`): @@ -426,7 +428,7 @@ See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/my ### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. @@ -444,10 +446,6 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. -## `context.Context` Support -Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. -See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. - ## Testing / Development To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. @@ -466,13 +464,13 @@ Mozilla summarizes the license scope as follows: That means: - * You can **use** the **unchanged** source code both in private and commercially - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** + * You can **use** the **unchanged** source code both in private and commercially. + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). ![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index 948a595618..e57061412b 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -40,7 +40,6 @@ type mysqlConn struct { status statusFlag sequence uint8 parseTime bool - strict bool // for context support (Go 1.8+) watching bool @@ -404,6 +403,7 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) return nil, err } } + // Columns rows.rs.columns, err = mc.readColumns(resLen) return rows, err diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go index 88cfff3fd8..4a19ca5235 100644 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -9,7 +9,8 @@ package mysql const ( - minProtocolVersion byte = 10 + defaultMaxAllowedPacket = 4 << 20 // 4 MiB + minProtocolVersion = 10 maxPacketSize = 1<<24 - 1 timeFormat = "2006-01-02 15:04:05.999999" ) @@ -87,8 +88,10 @@ const ( ) // https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +type fieldType byte + const ( - fieldTypeDecimal byte = iota + fieldTypeDecimal fieldType = iota fieldTypeTiny fieldTypeShort fieldTypeLong @@ -107,7 +110,7 @@ const ( fieldTypeBit ) const ( - fieldTypeJSON byte = iota + 0xf5 + fieldTypeJSON fieldType = iota + 0xf5 fieldTypeNewDecimal fieldTypeEnum fieldTypeSet diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index c341b66808..d42ce7a3de 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -64,7 +64,6 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { return nil, err } mc.parseTime = mc.cfg.ParseTime - mc.strict = mc.cfg.Strict // Connect to Server if dial, ok := dials[mc.cfg.Net]; ok { diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index 9cf4bb94a5..3ade963ee2 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -28,7 +28,9 @@ var ( errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") ) -// Config is a configuration parsed from a DSN string +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. type Config struct { User string // Username Passwd string // Password (requires User) @@ -55,7 +57,44 @@ type Config struct { MultiStatements bool // Allow multiple statements in one query ParseTime bool // Parse time values to time.Time RejectReadOnly bool // Reject read-only connections - Strict bool // Return warnings as errors +} + +// NewConfig creates a new Config and sets default values. +func NewConfig() *Config { + return &Config{ + Collation: defaultCollation, + Loc: time.UTC, + MaxAllowedPacket: defaultMaxAllowedPacket, + AllowNativePasswords: true, + } +} + +func (cfg *Config) normalize() error { + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return errors.New("default addr for network '" + cfg.Net + "' unknown") + } + + } else if cfg.Net == "tcp" { + cfg.Addr = ensureHavePort(cfg.Addr) + } + + return nil } // FormatDSN formats the given Config into a DSN string which can be passed to @@ -206,15 +245,6 @@ func (cfg *Config) FormatDSN() string { } } - if cfg.Strict { - if hasParam { - buf.WriteString("&strict=true") - } else { - hasParam = true - buf.WriteString("?strict=true") - } - } - if cfg.Timeout > 0 { if hasParam { buf.WriteString("&timeout=") @@ -245,7 +275,7 @@ func (cfg *Config) FormatDSN() string { buf.WriteString(cfg.WriteTimeout.String()) } - if cfg.MaxAllowedPacket > 0 { + if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { if hasParam { buf.WriteString("&maxAllowedPacket=") } else { @@ -283,11 +313,7 @@ func (cfg *Config) FormatDSN() string { // ParseDSN parses the DSN string to a Config func ParseDSN(dsn string) (cfg *Config, err error) { // New config with some default values - cfg = &Config{ - Loc: time.UTC, - Collation: defaultCollation, - AllowNativePasswords: true, - } + cfg = NewConfig() // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] // Find the last '/' (since the password or the net addr might contain a '/') @@ -355,28 +381,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) { return nil, errInvalidDSNNoSlash } - if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { - return nil, errInvalidDSNUnsafeCollation + if err = cfg.normalize(); err != nil { + return nil, err } - - // Set default network if empty - if cfg.Net == "" { - cfg.Net = "tcp" - } - - // Set default address if empty - if cfg.Addr == "" { - switch cfg.Net { - case "tcp": - cfg.Addr = "127.0.0.1:3306" - case "unix": - cfg.Addr = "/tmp/mysql.sock" - default: - return nil, errors.New("default addr for network '" + cfg.Net + "' unknown") - } - - } - return } @@ -499,11 +506,7 @@ func parseDSNParams(cfg *Config, params string) (err error) { // Strict mode case "strict": - var isBool bool - cfg.Strict, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } + panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") // Dial Timeout case "timeout": @@ -575,3 +578,10 @@ func parseDSNParams(cfg *Config, params string) (err error) { return } + +func ensureHavePort(addr string) string { + if _, _, err := net.SplitHostPort(addr); err != nil { + return net.JoinHostPort(addr, "3306") + } + return addr +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go index d0d0d2e118..760782ff2f 100644 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -9,10 +9,8 @@ package mysql import ( - "database/sql/driver" "errors" "fmt" - "io" "log" "os" ) @@ -65,74 +63,3 @@ type MySQLError struct { func (me *MySQLError) Error() string { return fmt.Sprintf("Error %d: %s", me.Number, me.Message) } - -// MySQLWarnings is an error type which represents a group of one or more MySQL -// warnings -type MySQLWarnings []MySQLWarning - -func (mws MySQLWarnings) Error() string { - var msg string - for i, warning := range mws { - if i > 0 { - msg += "\r\n" - } - msg += fmt.Sprintf( - "%s %s: %s", - warning.Level, - warning.Code, - warning.Message, - ) - } - return msg -} - -// MySQLWarning is an error type which represents a single MySQL warning. -// Warnings are returned in groups only. See MySQLWarnings -type MySQLWarning struct { - Level string - Code string - Message string -} - -func (mc *mysqlConn) getWarnings() (err error) { - rows, err := mc.Query("SHOW WARNINGS", nil) - if err != nil { - return - } - - var warnings = MySQLWarnings{} - var values = make([]driver.Value, 3) - - for { - err = rows.Next(values) - switch err { - case nil: - warning := MySQLWarning{} - - if raw, ok := values[0].([]byte); ok { - warning.Level = string(raw) - } else { - warning.Level = fmt.Sprintf("%s", values[0]) - } - if raw, ok := values[1].([]byte); ok { - warning.Code = string(raw) - } else { - warning.Code = fmt.Sprintf("%s", values[1]) - } - if raw, ok := values[2].([]byte); ok { - warning.Message = string(raw) - } else { - warning.Message = fmt.Sprintf("%s", values[0]) - } - - warnings = append(warnings, warning) - - case io.EOF: - return warnings - - default: - rows.Close() - return - } - } -} diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go new file mode 100644 index 0000000000..cded986d2a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -0,0 +1,140 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql" + "reflect" +) + +var typeDatabaseName = map[fieldType]string{ + fieldTypeBit: "BIT", + fieldTypeBLOB: "BLOB", + fieldTypeDate: "DATE", + fieldTypeDateTime: "DATETIME", + fieldTypeDecimal: "DECIMAL", + fieldTypeDouble: "DOUBLE", + fieldTypeEnum: "ENUM", + fieldTypeFloat: "FLOAT", + fieldTypeGeometry: "GEOMETRY", + fieldTypeInt24: "MEDIUMINT", + fieldTypeJSON: "JSON", + fieldTypeLong: "INT", + fieldTypeLongBLOB: "LONGBLOB", + fieldTypeLongLong: "BIGINT", + fieldTypeMediumBLOB: "MEDIUMBLOB", + fieldTypeNewDate: "DATE", + fieldTypeNewDecimal: "DECIMAL", + fieldTypeNULL: "NULL", + fieldTypeSet: "SET", + fieldTypeShort: "SMALLINT", + fieldTypeString: "CHAR", + fieldTypeTime: "TIME", + fieldTypeTimestamp: "TIMESTAMP", + fieldTypeTiny: "TINYINT", + fieldTypeTinyBLOB: "TINYBLOB", + fieldTypeVarChar: "VARCHAR", + fieldTypeVarString: "VARCHAR", + fieldTypeYear: "YEAR", +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(NullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +type mysqlField struct { + tableName string + name string + length uint32 + flags fieldFlag + fieldType fieldType + decimals byte +} + +func (mf *mysqlField) scanType() reflect.Type { + switch mf.fieldType { + case fieldTypeTiny: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint8 + } + return scanTypeInt8 + } + return scanTypeNullInt + + case fieldTypeShort, fieldTypeYear: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint16 + } + return scanTypeInt16 + } + return scanTypeNullInt + + case fieldTypeInt24, fieldTypeLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint32 + } + return scanTypeInt32 + } + return scanTypeNullInt + + case fieldTypeLongLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint64 + } + return scanTypeInt64 + } + return scanTypeNullInt + + case fieldTypeFloat: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat32 + } + return scanTypeNullFloat + + case fieldTypeDouble: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat64 + } + return scanTypeNullFloat + + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeTime: + return scanTypeRawBytes + + case fieldTypeDate, fieldTypeNewDate, + fieldTypeTimestamp, fieldTypeDateTime: + // NullTime is always returned for more consistent behavior as it can + // handle both cases of parseTime regardless if the field is nullable. + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 79648d5725..f63d25072c 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -571,7 +571,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error { errno := binary.LittleEndian.Uint16(data[1:3]) // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION - if errno == 1792 && mc.cfg.RejectReadOnly { + // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) + if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { // Oops; we are connected to a read-only connection, and won't be able // to issue any write statements. Since RejectReadOnly is configured, // we throw away this connection hoping this one would have write @@ -624,14 +625,7 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error { } // warning count [2 bytes] - if !mc.strict { - return nil - } - pos := 1 + n + m + 2 - if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { - return mc.getWarnings() - } return nil } @@ -706,11 +700,14 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { // Filler [uint8] // Charset [charset, collation uint8] + pos += n + 1 + 2 + // Length [uint32] - pos += n + 1 + 2 + 4 + columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) + pos += 4 // Field type [uint8] - columns[i].fieldType = data[pos] + columns[i].fieldType = fieldType(data[pos]) pos++ // Flags [uint16] @@ -843,14 +840,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { // Reserved [8 bit] // Warning count [16 bit uint] - if !stmt.mc.strict { - return columnCount, nil - } - // Check for warnings count > 0, only available in MySQL > 4.1 - if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { - return columnCount, stmt.mc.getWarnings() - } return columnCount, nil } return 0, err @@ -994,7 +984,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // build NULL-bitmap if arg == nil { nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i] = byte(fieldTypeNULL) paramTypes[i+i+1] = 0x00 continue } @@ -1002,7 +992,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // cache types and values switch v := arg.(type) { case int64: - paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i] = byte(fieldTypeLongLong) paramTypes[i+i+1] = 0x00 if cap(paramValues)-len(paramValues)-8 >= 0 { @@ -1018,7 +1008,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } case float64: - paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i] = byte(fieldTypeDouble) paramTypes[i+i+1] = 0x00 if cap(paramValues)-len(paramValues)-8 >= 0 { @@ -1034,7 +1024,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } case bool: - paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i] = byte(fieldTypeTiny) paramTypes[i+i+1] = 0x00 if v { @@ -1046,7 +1036,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { case []byte: // Common case (non-nil value) first if v != nil { - paramTypes[i+i] = fieldTypeString + paramTypes[i+i] = byte(fieldTypeString) paramTypes[i+i+1] = 0x00 if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { @@ -1064,11 +1054,11 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // Handle []byte(nil) as a NULL value nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i] = byte(fieldTypeNULL) paramTypes[i+i+1] = 0x00 case string: - paramTypes[i+i] = fieldTypeString + paramTypes[i+i] = byte(fieldTypeString) paramTypes[i+i+1] = 0x00 if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { @@ -1083,7 +1073,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { } case time.Time: - paramTypes[i+i] = fieldTypeString + paramTypes[i+i] = byte(fieldTypeString) paramTypes[i+i+1] = 0x00 var a [64]byte @@ -1157,10 +1147,11 @@ func (rows *binaryRows) readRow(dest []driver.Value) error { } return io.EOF } + mc := rows.mc rows.mc = nil // Error otherwise - return rows.mc.handleErrorPacket(data) + return mc.handleErrorPacket(data) } // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go index c7f5ee26cb..18f41693ef 100644 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -11,16 +11,10 @@ package mysql import ( "database/sql/driver" "io" + "math" + "reflect" ) -type mysqlField struct { - tableName string - name string - flags fieldFlag - fieldType byte - decimals byte -} - type resultSet struct { columns []mysqlField columnNames []string @@ -65,6 +59,47 @@ func (rows *mysqlRows) Columns() []string { return columns } +func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { + if name, ok := typeDatabaseName[rows.rs.columns[i].fieldType]; ok { + return name + } + return "" +} + +// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { +// return int64(rows.rs.columns[i].length), true +// } + +func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case fieldTypeDecimal, fieldTypeNewDecimal: + if decimals > 0 { + return int64(column.length) - 2, decimals, true + } + return int64(column.length) - 1, decimals, true + case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: + return decimals, decimals, true + case fieldTypeFloat, fieldTypeDouble: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + func (rows *mysqlRows) Close() (err error) { if f := rows.finish; f != nil { f() diff --git a/vendor/github.com/gocql/gocql/AUTHORS b/vendor/github.com/gocql/gocql/AUTHORS index 3557f60ca6..30d66d0550 100644 --- a/vendor/github.com/gocql/gocql/AUTHORS +++ b/vendor/github.com/gocql/gocql/AUTHORS @@ -93,3 +93,7 @@ Yasser Abdolmaleki Krishnanand Thommandra Blake Atkinson Dharmendra Parsaila +Nayef Ghattas +Michał Matczuk +Ben Krebsbach +Vivian Mathews diff --git a/vendor/github.com/gocql/gocql/README.md b/vendor/github.com/gocql/gocql/README.md index 2c91575142..bc07eeca1a 100644 --- a/vendor/github.com/gocql/gocql/README.md +++ b/vendor/github.com/gocql/gocql/README.md @@ -19,8 +19,8 @@ The following matrix shows the versions of Go and Cassandra that are tested with Go/Cassandra | 2.1.x | 2.2.x | 3.0.x -------------| -------| ------| --------- -1.6 | yes | yes | yes -1.7 | yes | yes | yes +1.8 | yes | yes | yes +1.9 | yes | yes | yes Gocql has been tested in production against many different versions of Cassandra. Due to limits in our CI setup we only test against the latest 3 major releases, which coincide with the official support from the Apache project. diff --git a/vendor/github.com/gocql/gocql/cluster.go b/vendor/github.com/gocql/gocql/cluster.go index 86406f2bff..f011cefcb6 100644 --- a/vendor/github.com/gocql/gocql/cluster.go +++ b/vendor/github.com/gocql/gocql/cluster.go @@ -168,6 +168,10 @@ func (cfg *ClusterConfig) translateAddressPort(addr net.IP, port int) (net.IP, i return newAddr, newPort } +func (cfg *ClusterConfig) filterHost(host *HostInfo) bool { + return !(cfg.HostFilter == nil || cfg.HostFilter.Accept(host)) +} + var ( ErrNoHosts = errors.New("no hosts provided") ErrNoConnectionsStarted = errors.New("no connections were made when creating the session") diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go index c80c4d92b7..0d927cd66c 100644 --- a/vendor/github.com/gocql/gocql/conn.go +++ b/vendor/github.com/gocql/gocql/conn.go @@ -141,8 +141,6 @@ type Conn struct { version uint8 currentKeyspace string - host *HostInfo - session *Session closed int32 @@ -152,14 +150,12 @@ type Conn struct { } // Connect establishes a connection to a Cassandra node. -func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, session *Session) (*Conn, error) { +func (s *Session) dial(ip net.IP, port int, cfg *ConnConfig, errorHandler ConnErrorHandler) (*Conn, error) { // TODO(zariel): remove these - if host == nil { - panic("host is nil") - } else if len(host.ConnectAddress()) == 0 { - panic(fmt.Sprintf("host missing connect ip address: %v", host)) - } else if host.Port() == 0 { - panic(fmt.Sprintf("host missing port: %v", host)) + if len(ip) == 0 || ip.IsUnspecified() { + panic(fmt.Sprintf("host missing connect ip address: %v", ip)) + } else if port == 0 { + panic(fmt.Sprintf("host missing port: %v", port)) } var ( @@ -172,9 +168,7 @@ func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, ses } // TODO(zariel): handle ipv6 zone - translatedPeer, translatedPort := session.cfg.translateAddressPort(host.ConnectAddress(), host.Port()) - addr := (&net.TCPAddr{IP: translatedPeer, Port: translatedPort}).String() - //addr := (&net.TCPAddr{IP: host.Peer(), Port: host.Port()}).String() + addr := (&net.TCPAddr{IP: ip, Port: port}).String() if cfg.tlsConfig != nil { // the TLS config is safe to be reused by connections but it must not @@ -200,9 +194,8 @@ func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, ses compressor: cfg.Compressor, auth: cfg.Authenticator, quit: make(chan struct{}), - session: session, + session: s, streams: streams.New(cfg.ProtoVersion), - host: host, } if cfg.Keepalive > 0 { @@ -405,13 +398,20 @@ func (c *Conn) closeWithError(err error) { // if error was nil then unblock the quit channel close(c.quit) - c.conn.Close() + cerr := c.close() if err != nil { c.errorHandler.HandleError(c, err, true) + } else if cerr != nil { + // TODO(zariel): is it a good idea to do this? + c.errorHandler.HandleError(c, cerr, true) } } +func (c *Conn) close() error { + return c.conn.Close() +} + func (c *Conn) Close() { c.closeWithError(nil) } @@ -420,15 +420,9 @@ func (c *Conn) Close() { // to execute any queries. This method runs as long as the connection is // open and is therefore usually called in a separate goroutine. func (c *Conn) serve() { - var ( - err error - ) - - for { + var err error + for err == nil { err = c.recv() - if err != nil { - break - } } c.closeWithError(err) @@ -887,8 +881,9 @@ func (c *Conn) executeQuery(qry *Query) *Iter { if len(x.meta.pagingState) > 0 && !qry.disableAutoPage { iter.next = &nextIter{ - qry: *qry, - pos: int((1 - qry.prefetch) * float64(x.numRows)), + qry: *qry, + pos: int((1 - qry.prefetch) * float64(x.numRows)), + conn: c, } iter.next.qry.pageState = copyBytes(x.meta.pagingState) @@ -1100,7 +1095,7 @@ func (c *Conn) query(statement string, values ...interface{}) (iter *Iter) { func (c *Conn) awaitSchemaAgreement() (err error) { const ( - peerSchemas = "SELECT schema_version FROM system.peers" + peerSchemas = "SELECT schema_version, peer FROM system.peers" localSchemas = "SELECT schema_version FROM system.local WHERE key='local'" ) @@ -1113,9 +1108,10 @@ func (c *Conn) awaitSchemaAgreement() (err error) { versions = make(map[string]struct{}) var schemaVersion string - for iter.Scan(&schemaVersion) { + var peer string + for iter.Scan(&schemaVersion, &peer) { if schemaVersion == "" { - Logger.Println("skipping peer entry with empty schema_version") + Logger.Printf("skipping peer entry with empty schema_version: peer=%q", peer) continue } @@ -1158,6 +1154,23 @@ func (c *Conn) awaitSchemaAgreement() (err error) { return fmt.Errorf("gocql: cluster schema versions not consistent: %+v", schemas) } +const localHostInfo = "SELECT * FROM system.local WHERE key='local'" + +func (c *Conn) localHostInfo() (*HostInfo, error) { + row, err := c.query(localHostInfo).rowMap() + if err != nil { + return nil, err + } + + // TODO(zariel): avoid doing this here + host, err := c.session.hostInfoFromMap(row) + if err != nil { + return nil, err + } + + return c.session.ring.addOrUpdate(host), nil +} + var ( ErrQueryArgLength = errors.New("gocql: query argument length mismatch") ErrTimeoutNoResponse = errors.New("gocql: no response received from cassandra within timeout period") diff --git a/vendor/github.com/gocql/gocql/control.go b/vendor/github.com/gocql/gocql/control.go index 1f7424137d..3aa9d61ce6 100644 --- a/vendor/github.com/gocql/gocql/control.go +++ b/vendor/github.com/gocql/gocql/control.go @@ -7,6 +7,7 @@ import ( "fmt" "math/rand" "net" + "os" "regexp" "strconv" "sync" @@ -31,13 +32,15 @@ func init() { // Ensure that the atomic variable is aligned to a 64bit boundary // so that atomic operations can be applied on 32bit architectures. type controlConn struct { + started int32 + reconnecting int32 + session *Session conn atomic.Value retry RetryPolicy - started int32 - quit chan struct{} + quit chan struct{} } func createControlConn(session *Session) *controlConn { @@ -47,7 +50,7 @@ func createControlConn(session *Session) *controlConn { retry: &SimpleRetryPolicy{NumRetries: 3}, } - control.conn.Store((*Conn)(nil)) + control.conn.Store((*connHost)(nil)) return control } @@ -58,12 +61,16 @@ func (c *controlConn) heartBeat() { } sleepTime := 1 * time.Second + timer := time.NewTimer(sleepTime) + defer timer.Stop() for { + timer.Reset(sleepTime) + select { case <-c.quit: return - case <-time.After(sleepTime): + case <-timer.C: } resp, err := c.writeFrame(&writeOptionsFrame{}) @@ -86,12 +93,11 @@ func (c *controlConn) heartBeat() { // try to connect a bit faster sleepTime = 1 * time.Second c.reconnect(true) - // time.Sleep(5 * time.Second) continue } } -var hostLookupPreferV4 = false +var hostLookupPreferV4 = os.Getenv("GOCQL_HOST_LOOKUP_PREFER_V4") == "true" func hostInfo(addr string, defaultPort int) (*HostInfo, error) { var port int @@ -197,14 +203,20 @@ func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) { handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) { // we should never get here, but if we do it means we connected to a // host successfully which means our attempted protocol version worked + if !closed { + c.Close() + } }) var err error for _, host := range hosts { var conn *Conn - conn, err = Connect(host, &connCfg, handler, c.session) - if err == nil { + conn, err = c.session.dial(host.ConnectAddress(), host.Port(), &connCfg, handler) + if conn != nil { conn.Close() + } + + if err == nil { return connCfg.ProtoVersion, nil } @@ -239,35 +251,31 @@ func (c *controlConn) connect(hosts []*HostInfo) error { return nil } +type connHost struct { + conn *Conn + host *HostInfo +} + func (c *controlConn) setupConn(conn *Conn) error { if err := c.registerEvents(conn); err != nil { conn.Close() return err } - c.conn.Store(conn) - - if v, ok := conn.conn.RemoteAddr().(*net.TCPAddr); ok { - c.session.handleNodeUp(copyBytes(v.IP), v.Port, false) - return nil - } - - host, portstr, err := net.SplitHostPort(conn.conn.RemoteAddr().String()) + // TODO(zariel): do we need to fetch host info everytime + // the control conn connects? Surely we have it cached? + host, err := conn.localHostInfo() if err != nil { return err } - port, err := strconv.Atoi(portstr) - if err != nil { - return err + ch := &connHost{ + conn: conn, + host: host, } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("invalid remote addr: addr=%v host=%q", conn.conn.RemoteAddr(), host) - } - - c.session.handleNodeUp(ip, port, false) + c.conn.Store(ch) + c.session.handleNodeUp(host.ConnectAddress(), host.Port(), false) return nil } @@ -308,14 +316,18 @@ func (c *controlConn) registerEvents(conn *Conn) error { } func (c *controlConn) reconnect(refreshring bool) { + if !atomic.CompareAndSwapInt32(&c.reconnecting, 0, 1) { + return + } + defer atomic.StoreInt32(&c.reconnecting, 0) // TODO: simplify this function, use session.ring to get hosts instead of the // connection pool var host *HostInfo - oldConn := c.conn.Load().(*Conn) - if oldConn != nil { - host = oldConn.host - oldConn.Close() + ch := c.getConn() + if ch != nil { + host = ch.host + ch.conn.Close() } var newConn *Conn @@ -364,21 +376,25 @@ func (c *controlConn) HandleError(conn *Conn, err error, closed bool) { return } - oldConn := c.conn.Load().(*Conn) - if oldConn != conn { + oldConn := c.getConn() + if oldConn.conn != conn { return } c.reconnect(true) } +func (c *controlConn) getConn() *connHost { + return c.conn.Load().(*connHost) +} + func (c *controlConn) writeFrame(w frameWriter) (frame, error) { - conn := c.conn.Load().(*Conn) - if conn == nil { + ch := c.getConn() + if ch == nil { return nil, errNoControl } - framer, err := conn.exec(context.Background(), w, nil) + framer, err := ch.conn.exec(context.Background(), w, nil) if err != nil { return nil, err } @@ -386,13 +402,13 @@ func (c *controlConn) writeFrame(w frameWriter) (frame, error) { return framer.parseFrame() } -func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { +func (c *controlConn) withConnHost(fn func(*connHost) *Iter) *Iter { const maxConnectAttempts = 5 connectAttempts := 0 for i := 0; i < maxConnectAttempts; i++ { - conn := c.conn.Load().(*Conn) - if conn == nil { + ch := c.getConn() + if ch == nil { if connectAttempts > maxConnectAttempts { break } @@ -403,12 +419,18 @@ func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { continue } - return fn(conn) + return fn(ch) } return &Iter{err: errNoControl} } +func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { + return c.withConnHost(func(ch *connHost) *Iter { + return fn(ch.conn) + }) +} + // query will return nil if the connection is closed or nil func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter) { q := c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}).Trace(nil) @@ -437,21 +459,14 @@ func (c *controlConn) awaitSchemaAgreement() error { }).err } -func (c *controlConn) GetHostInfo() *HostInfo { - conn := c.conn.Load().(*Conn) - if conn == nil { - return nil - } - return conn.host -} - func (c *controlConn) close() { if atomic.CompareAndSwapInt32(&c.started, 1, -1) { c.quit <- struct{}{} } - conn := c.conn.Load().(*Conn) - if conn != nil { - conn.Close() + + ch := c.getConn() + if ch != nil { + ch.conn.Close() } } diff --git a/vendor/github.com/gocql/gocql/events.go b/vendor/github.com/gocql/gocql/events.go index 78daa76b41..e6d28a19b7 100644 --- a/vendor/github.com/gocql/gocql/events.go +++ b/vendor/github.com/gocql/gocql/events.go @@ -173,16 +173,30 @@ func (s *Session) handleNodeEvent(frames []frame) { } } -func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { - // Get host info and apply any filters to the host - hostInfo, err := s.hostSource.GetHostInfo(ip, port) - if err != nil { - Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) +func (s *Session) addNewNode(host *HostInfo) { + if s.cfg.filterHost(host) { return } - // If hostInfo is nil, this host was filtered out by cfg.HostFilter - if hostInfo == nil { + host.setState(NodeUp) + s.pool.addHost(host) + s.policy.AddHost(host) +} + +func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { + if gocqlDebug { + Logger.Printf("gocql: Session.handleNewNode: %s:%d\n", ip.String(), port) + } + + ip, port = s.cfg.translateAddressPort(ip, port) + + // Get host info and apply any filters to the host + hostInfo, err := s.hostSource.getHostInfo(ip, port) + if err != nil { + Logger.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err) + return + } else if hostInfo == nil { + // If hostInfo is nil, this host was filtered out by cfg.HostFilter return } @@ -191,20 +205,23 @@ func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) { } // should this handle token moving? - if existing, ok := s.ring.addHostIfMissing(hostInfo); ok { - existing.update(hostInfo) - hostInfo = existing - } + hostInfo = s.ring.addOrUpdate(hostInfo) + + s.addNewNode(hostInfo) - s.pool.addHost(hostInfo) - s.policy.AddHost(hostInfo) - hostInfo.setState(NodeUp) if s.control != nil && !s.cfg.IgnorePeerAddr { + // TODO(zariel): debounce ring refresh s.hostSource.refreshRing() } } func (s *Session) handleRemovedNode(ip net.IP, port int) { + if gocqlDebug { + Logger.Printf("gocql: Session.handleRemovedNode: %s:%d\n", ip.String(), port) + } + + ip, port = s.cfg.translateAddressPort(ip, port) + // we remove all nodes but only add ones which pass the filter host := s.ring.getHost(ip) if host == nil { @@ -225,34 +242,30 @@ func (s *Session) handleRemovedNode(ip net.IP, port int) { } } -func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) { +func (s *Session) handleNodeUp(eventIp net.IP, eventPort int, waitForBinary bool) { if gocqlDebug { - Logger.Printf("gocql: Session.handleNodeUp: %s:%d\n", ip.String(), port) + Logger.Printf("gocql: Session.handleNodeUp: %s:%d\n", eventIp.String(), eventPort) } + ip, _ := s.cfg.translateAddressPort(eventIp, eventPort) + host := s.ring.getHost(ip) - if host != nil { - // If we receive a node up event and user has asked us to ignore the peer address use - // the address provide by the event instead the address provide by peer the table. - if s.cfg.IgnorePeerAddr && !host.ConnectAddress().Equal(ip) { - host.SetConnectAddress(ip) - } - - if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { - return - } - - if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary { - time.Sleep(t) - } - - s.pool.hostUp(host) - s.policy.HostUp(host) - host.setState(NodeUp) + if host == nil { + // TODO(zariel): avoid the need to translate twice in this + // case + s.handleNewNode(eventIp, eventPort, waitForBinary) return } - s.handleNewNode(ip, port, waitForBinary) + if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) { + return + } + + if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary { + time.Sleep(t) + } + + s.addNewNode(host) } func (s *Session) handleNodeDown(ip net.IP, port int) { diff --git a/vendor/github.com/gocql/gocql/frame.go b/vendor/github.com/gocql/gocql/frame.go index 897fb722b6..a8a6b2f3f8 100644 --- a/vendor/github.com/gocql/gocql/frame.go +++ b/vendor/github.com/gocql/gocql/frame.go @@ -196,45 +196,61 @@ func (c Consistency) String() string { } } -func ParseConsistency(s string) Consistency { - switch strings.ToUpper(s) { +func (c Consistency) MarshalText() (text []byte, err error) { + return []byte(c.String()), nil +} + +func (c *Consistency) UnmarshalText(text []byte) error { + switch string(text) { case "ANY": - return Any + *c = Any case "ONE": - return One + *c = One case "TWO": - return Two + *c = Two case "THREE": - return Three + *c = Three case "QUORUM": - return Quorum + *c = Quorum case "ALL": - return All + *c = All case "LOCAL_QUORUM": - return LocalQuorum + *c = LocalQuorum case "EACH_QUORUM": - return EachQuorum + *c = EachQuorum case "LOCAL_ONE": - return LocalOne + *c = LocalOne default: - panic("invalid consistency: " + s) + return fmt.Errorf("invalid consistency %q", string(text)) } + + return nil +} + +func ParseConsistency(s string) Consistency { + var c Consistency + if err := c.UnmarshalText([]byte(strings.ToUpper(s))); err != nil { + panic(err) + } + return c } // ParseConsistencyWrapper wraps gocql.ParseConsistency to provide an err // return instead of a panic func ParseConsistencyWrapper(s string) (consistency Consistency, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("ParseConsistencyWrapper: %v", r) - } - } - }() - consistency = ParseConsistency(s) - return consistency, nil + err = consistency.UnmarshalText([]byte(strings.ToUpper(s))) + return +} + +// MustParseConsistency is the same as ParseConsistency except it returns +// an error (never). It is kept here since breaking changes are not good. +// DEPRECATED: use ParseConsistency if you want a panic on parse error. +func MustParseConsistency(s string) (Consistency, error) { + c, err := ParseConsistencyWrapper(s) + if err != nil { + panic(err) + } + return c, nil } type SerialConsistency uint16 @@ -255,6 +271,23 @@ func (s SerialConsistency) String() string { } } +func (s SerialConsistency) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *SerialConsistency) UnmarshalText(text []byte) error { + switch string(text) { + case "SERIAL": + *s = Serial + case "LOCAL_SERIAL": + *s = LocalSerial + default: + return fmt.Errorf("invalid consistency %q", string(text)) + } + + return nil +} + const ( apacheCassandraTypePrefix = "org.apache.cassandra.db.marshal." ) diff --git a/vendor/github.com/gocql/gocql/helpers.go b/vendor/github.com/gocql/gocql/helpers.go index cb23dad9d5..944517e289 100644 --- a/vendor/github.com/gocql/gocql/helpers.go +++ b/vendor/github.com/gocql/gocql/helpers.go @@ -205,30 +205,43 @@ func (iter *Iter) RowData() (RowData, error) { return RowData{}, iter.err } - columns := make([]string, 0) - values := make([]interface{}, 0) + columns := make([]string, 0, len(iter.Columns())) + values := make([]interface{}, 0, len(iter.Columns())) for _, column := range iter.Columns() { - - switch c := column.TypeInfo.(type) { - case TupleTypeInfo: + if c, ok := column.TypeInfo.(TupleTypeInfo); !ok { + val := column.TypeInfo.New() + columns = append(columns, column.Name) + values = append(values, val) + } else { for i, elem := range c.Elems { columns = append(columns, TupleColumnName(column.Name, i)) values = append(values, elem.New()) } - default: - val := column.TypeInfo.New() - columns = append(columns, column.Name) - values = append(values, val) } } + rowData := RowData{ Columns: columns, Values: values, } + return rowData, nil } +// TODO(zariel): is it worth exporting this? +func (iter *Iter) rowMap() (map[string]interface{}, error) { + if iter.err != nil { + return nil, iter.err + } + + rowData, _ := iter.RowData() + iter.Scan(rowData.Values...) + m := make(map[string]interface{}, len(rowData.Columns)) + rowData.rowMap(m) + return m, nil +} + // SliceMap is a helper function to make the API easier to use // returns the data from the query in the form of []map[string]interface{} func (iter *Iter) SliceMap() ([]map[string]interface{}, error) { @@ -240,7 +253,7 @@ func (iter *Iter) SliceMap() ([]map[string]interface{}, error) { rowData, _ := iter.RowData() dataToReturn := make([]map[string]interface{}, 0) for iter.Scan(rowData.Values...) { - m := make(map[string]interface{}) + m := make(map[string]interface{}, len(rowData.Columns)) rowData.rowMap(m) dataToReturn = append(dataToReturn, m) } diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go index bda6e0a29d..8422a0b50f 100644 --- a/vendor/github.com/gocql/gocql/host_source.go +++ b/vendor/github.com/gocql/gocql/host_source.go @@ -183,6 +183,7 @@ func (h *HostInfo) ConnectAddress() net.IP { } func (h *HostInfo) SetConnectAddress(address net.IP) *HostInfo { + // TODO(zariel): should this not be exported? h.mu.Lock() defer h.mu.Unlock() h.connectAddress = address @@ -338,10 +339,24 @@ func (h *HostInfo) update(from *HostInfo) { h.mu.Lock() defer h.mu.Unlock() - h.tokens = from.tokens - h.version = from.version - h.hostId = from.hostId + h.peer = from.peer + h.broadcastAddress = from.broadcastAddress + h.listenAddress = from.listenAddress + h.rpcAddress = from.rpcAddress + h.preferredIP = from.preferredIP + h.connectAddress = from.connectAddress + h.port = from.port h.dataCenter = from.dataCenter + h.rack = from.rack + h.hostId = from.hostId + h.workload = from.workload + h.graph = from.graph + h.dseVersion = from.dseVersion + h.partitioner = from.partitioner + h.clusterName = from.clusterName + h.version = from.version + h.state = from.state + h.tokens = from.tokens } func (h *HostInfo) IsUp() bool { @@ -366,7 +381,6 @@ type ringDescriber struct { session *Session mu sync.Mutex prevHosts []*HostInfo - localHost *HostInfo prevPartitioner string } @@ -388,13 +402,13 @@ func checkSystemSchema(control *controlConn) (bool, error) { // Given a map that represents a row from either system.local or system.peers // return as much information as we can in *HostInfo -func (r *ringDescriber) hostInfoFromMap(row map[string]interface{}) (*HostInfo, error) { +func (s *Session) hostInfoFromMap(row map[string]interface{}) (*HostInfo, error) { const assertErrorMsg = "Assertion failed for %s" var ok bool // Default to our connected port if the cluster doesn't have port information host := HostInfo{ - port: r.session.cfg.Port, + port: s.cfg.Port, } for key, value := range row { @@ -486,86 +500,51 @@ func (r *ringDescriber) hostInfoFromMap(row map[string]interface{}) (*HostInfo, // Not sure what the port field will be called until the JIRA issue is complete } + ip, port := s.cfg.translateAddressPort(host.ConnectAddress(), host.port) + host.connectAddress = ip + host.port = port + return &host, nil } -// Ask the control node for it's local host information -func (r *ringDescriber) GetLocalHostInfo() (*HostInfo, error) { - it := r.session.control.query("SELECT * FROM system.local WHERE key='local'") - if it == nil { - return nil, errors.New("Attempted to query 'system.local' on a closed control connection") - } - host, err := r.extractHostInfo(it) - if err != nil { - return nil, err - } - - if host.invalidConnectAddr() { - host.SetConnectAddress(r.session.control.GetHostInfo().ConnectAddress()) - } - - return host, nil -} - -// Given an ip address and port, return a peer that matched the ip address -func (r *ringDescriber) GetPeerHostInfo(ip net.IP, port int) (*HostInfo, error) { - it := r.session.control.query("SELECT * FROM system.peers WHERE peer=?", ip) - if it == nil { - return nil, errors.New("Attempted to query 'system.peers' on a closed control connection") - } - return r.extractHostInfo(it) -} - -func (r *ringDescriber) extractHostInfo(it *Iter) (*HostInfo, error) { - row := make(map[string]interface{}) - - // expect only 1 row - it.MapScan(row) - if err := it.Close(); err != nil { - return nil, err - } - - // extract all available info about the host - return r.hostInfoFromMap(row) -} - // Ask the control node for host info on all it's known peers -func (r *ringDescriber) GetClusterPeerInfo() ([]*HostInfo, error) { +func (r *ringDescriber) getClusterPeerInfo() ([]*HostInfo, error) { var hosts []*HostInfo + iter := r.session.control.withConnHost(func(ch *connHost) *Iter { + hosts = append(hosts, ch.host) + return ch.conn.query("SELECT * FROM system.peers") + }) - // Ask the node for a list of it's peers - it := r.session.control.query("SELECT * FROM system.peers") - if it == nil { - return nil, errors.New("Attempted to query 'system.peers' on a closed connection") + if iter == nil { + return nil, errNoControl } - for { - row := make(map[string]interface{}) - if !it.MapScan(row) { - break - } + rows, err := iter.SliceMap() + if err != nil { + // TODO(zariel): make typed error + return nil, fmt.Errorf("unable to fetch peer host info: %s", err) + } + + for _, row := range rows { // extract all available info about the peer - host, err := r.hostInfoFromMap(row) + host, err := r.session.hostInfoFromMap(row) if err != nil { return nil, err - } - - // If it's not a valid peer - if !r.IsValidPeer(host) { - Logger.Printf("Found invalid peer '%+v' "+ + } else if !isValidPeer(host) { + // If it's not a valid peer + Logger.Printf("Found invalid peer '%s' "+ "Likely due to a gossip or snitch issue, this host will be ignored", host) continue } + hosts = append(hosts, host) } - if it.err != nil { - return nil, fmt.Errorf("while scanning 'system.peers' table: %s", it.err) - } + return hosts, nil } // Return true if the host is a valid peer -func (r *ringDescriber) IsValidPeer(host *HostInfo) bool { +func isValidPeer(host *HostInfo) bool { return !(len(host.RPCAddress()) == 0 || host.hostId == "" || host.dataCenter == "" || @@ -578,84 +557,58 @@ func (r *ringDescriber) GetHosts() ([]*HostInfo, string, error) { r.mu.Lock() defer r.mu.Unlock() - // Update the localHost info with data from the connected host - localHost, err := r.GetLocalHostInfo() - if err != nil { - return r.prevHosts, r.prevPartitioner, err - } else if localHost.invalidConnectAddr() { - panic(fmt.Sprintf("unable to get localhost connect address: %v", localHost)) - } - - // Update our list of hosts by querying the cluster - hosts, err := r.GetClusterPeerInfo() + hosts, err := r.getClusterPeerInfo() if err != nil { return r.prevHosts, r.prevPartitioner, err } - hosts = append(hosts, localHost) - - // Filter the hosts if filter is provided - filteredHosts := hosts - if r.session.cfg.HostFilter != nil { - filteredHosts = filteredHosts[:0] - for _, host := range hosts { - if r.session.cfg.HostFilter.Accept(host) { - filteredHosts = append(filteredHosts, host) - } - } + var partitioner string + if len(hosts) > 0 { + partitioner = hosts[0].Partitioner() } - r.prevHosts = filteredHosts - r.prevPartitioner = localHost.partitioner - r.localHost = localHost - - return filteredHosts, localHost.partitioner, nil + return hosts, partitioner, nil } // Given an ip/port return HostInfo for the specified ip/port -func (r *ringDescriber) GetHostInfo(ip net.IP, port int) (*HostInfo, error) { - // TODO(thrawn01): Is IgnorePeerAddr still useful now that we have DisableInitialHostLookup? - // TODO(thrawn01): should we also check for DisableInitialHostLookup and return if true? +func (r *ringDescriber) getHostInfo(ip net.IP, port int) (*HostInfo, error) { + var host *HostInfo + iter := r.session.control.withConnHost(func(ch *connHost) *Iter { + if ch.host.ConnectAddress().Equal(ip) { + host = ch.host + return nil + } - // Ignore the port and connect address and use the address/port we already have - if r.session.control == nil || r.session.cfg.IgnorePeerAddr { - return &HostInfo{connectAddress: ip, port: port}, nil + return ch.conn.query("SELECT * FROM system.peers") + }) + + if iter != nil { + rows, err := iter.SliceMap() + if err != nil { + return nil, err + } + + for _, row := range rows { + h, err := r.session.hostInfoFromMap(row) + if err != nil { + return nil, err + } + + if host.ConnectAddress().Equal(ip) { + host = h + break + } + } + + if host == nil { + return nil, errors.New("host not found in peers table") + } } - // Attempt to get the host info for our control connection - controlHost := r.session.control.GetHostInfo() - if controlHost == nil { - return nil, errors.New("invalid control connection") - } - - var ( - host *HostInfo - err error - ) - - // If we are asking about the same node our control connection has a connection too - if controlHost.ConnectAddress().Equal(ip) { - host, err = r.GetLocalHostInfo() - } else { - host, err = r.GetPeerHostInfo(ip, port) - } - - // No host was found matching this ip/port - if err != nil { - return nil, err - } - - if controlHost.ConnectAddress().Equal(ip) { - // Always respect the provided control node address and disregard the ip address - // the cassandra node provides. We do this as we are already connected and have a - // known valid ip address. This insulates gocql from client connection issues stemming - // from node misconfiguration. For instance when a node is run from a container, by - // default the node will report its ip address as 127.0.0.1 which is typically invalid. - host.SetConnectAddress(ip) - } - - if host.invalidConnectAddr() { - return nil, fmt.Errorf("host ConnectAddress invalid: %v", host) + if host == nil { + return nil, errors.New("unable to fetch host info: invalid control connection") + } else if host.invalidConnectAddr() { + return nil, fmt.Errorf("host ConnectAddress invalid ip=%v: %v", ip, host) } return host, nil @@ -675,6 +628,10 @@ func (r *ringDescriber) refreshRing() error { // TODO: move this to session for _, h := range hosts { + if filter := r.session.cfg.HostFilter; filter != nil && !filter.Accept(h) { + continue + } + if host, ok := r.session.ring.addHostIfMissing(h); !ok { r.session.pool.addHost(h) r.session.policy.AddHost(h) diff --git a/vendor/github.com/gocql/gocql/marshal.go b/vendor/github.com/gocql/gocql/marshal.go index a33212053c..0383fed7cb 100644 --- a/vendor/github.com/gocql/gocql/marshal.go +++ b/vendor/github.com/gocql/gocql/marshal.go @@ -1667,6 +1667,16 @@ func marshalTuple(info TypeInfo, value interface{}) ([]byte, error) { return nil, marshalErrorf("cannot marshal %T into %s", value, tuple) } +func readBytes(p []byte) ([]byte, []byte) { + // TODO: really should use a framer + size := readInt(p) + p = p[4:] + if size < 0 { + return nil, p + } + return p[:size], p[size:] +} + // currently only support unmarshal into a list of values, this makes it possible // to support tuples without changing the query API. In the future this can be extend // to allow unmarshalling into custom tuple types. @@ -1680,14 +1690,13 @@ func unmarshalTuple(info TypeInfo, data []byte, value interface{}) error { case []interface{}: for i, elem := range tuple.Elems { // each element inside data is a [bytes] - size := readInt(data) - data = data[4:] + var p []byte + p, data = readBytes(data) - err := Unmarshal(elem, data[:size], v[i]) + err := Unmarshal(elem, p, v[i]) if err != nil { return err } - data = data[size:] } return nil @@ -1864,18 +1873,11 @@ func unmarshalUDT(info TypeInfo, data []byte, value interface{}) error { if len(data) == 0 { return nil } - size := readInt(data[:4]) - data = data[4:] - var err error - if size < 0 { - err = v.UnmarshalUDT(e.Name, e.Type, nil) - } else { - err = v.UnmarshalUDT(e.Name, e.Type, data[:size]) - data = data[size:] - } + var p []byte + p, data = readBytes(data) - if err != nil { + if err := v.UnmarshalUDT(e.Name, e.Type, p); err != nil { return err } } @@ -1905,20 +1907,13 @@ func unmarshalUDT(info TypeInfo, data []byte, value interface{}) error { if len(data) == 0 { return nil } - size := readInt(data[:4]) - data = data[4:] val := reflect.New(goType(e.Type)) - var err error - if size < 0 { - err = Unmarshal(e.Type, nil, val.Interface()) - } else { - err = Unmarshal(e.Type, data[:size], val.Interface()) - data = data[size:] - } + var p []byte + p, data = readBytes(data) - if err != nil { + if err := Unmarshal(e.Type, p, val.Interface()); err != nil { return err } @@ -1958,30 +1953,26 @@ func unmarshalUDT(info TypeInfo, data []byte, value interface{}) error { return nil } - size := readInt(data[:4]) - data = data[4:] + var p []byte + p, data = readBytes(data) - if size >= 0 { - f, ok := fields[e.Name] - if !ok { - f = k.FieldByName(e.Name) - if f == emptyValue { - // skip fields which exist in the UDT but not in - // the struct passed in - data = data[size:] // Skip over this data to go to next - continue - } + f, ok := fields[e.Name] + if !ok { + f = k.FieldByName(e.Name) + if f == emptyValue { + // skip fields which exist in the UDT but not in + // the struct passed in + continue } + } - if !f.IsValid() || !f.CanAddr() { - return unmarshalErrorf("cannot unmarshal %s into %T: field %v is not valid", info, value, e.Name) - } + if !f.IsValid() || !f.CanAddr() { + return unmarshalErrorf("cannot unmarshal %s into %T: field %v is not valid", info, value, e.Name) + } - fk := f.Addr().Interface() - if err := Unmarshal(e.Type, data[:size], fk); err != nil { - return err - } - data = data[size:] + fk := f.Addr().Interface() + if err := Unmarshal(e.Type, p, fk); err != nil { + return err } } diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go index b53bd45fbe..0001db1181 100644 --- a/vendor/github.com/gocql/gocql/policies.go +++ b/vendor/github.com/gocql/gocql/policies.go @@ -187,6 +187,9 @@ func (e *ExponentialBackoffRetryPolicy) napTime(attempts int) time.Duration { napDuration := minFloat * math.Pow(2, float64(attempts-1)) // add some jitter napDuration += rand.Float64()*minFloat - (minFloat / 2) + if napDuration > float64(e.Max) { + return time.Duration(e.Max) + } return time.Duration(napDuration) } @@ -294,6 +297,9 @@ type tokenAwareHostPolicy struct { } func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.partitioner != partitioner { t.fallback.SetPartitioner(partitioner) t.partitioner = partitioner @@ -303,6 +309,9 @@ func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) { } func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { + t.mu.Lock() + defer t.mu.Unlock() + t.hosts.add(host) t.fallback.AddHost(host) @@ -310,6 +319,9 @@ func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { } func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) { + t.mu.Lock() + defer t.mu.Unlock() + t.hosts.remove(host.ConnectAddress()) t.fallback.RemoveHost(host) @@ -325,9 +337,6 @@ func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) { } func (t *tokenAwareHostPolicy) resetTokenRing() { - t.mu.Lock() - defer t.mu.Unlock() - if t.partitioner == "" { // partitioner not yet set return @@ -537,43 +546,36 @@ func (host selectedHostPoolHost) Mark(err error) { } type dcAwareRR struct { - local string - + local string + pos uint32 mu sync.RWMutex - localHosts map[string]*HostInfo - remoteHosts map[string]*HostInfo + localHosts cowHostList + remoteHosts cowHostList } -// DCAwareRoundRobinPolicy is a host selection policies which will priorities and +// DCAwareRoundRobinPolicy is a host selection policies which will prioritize and // return hosts which are in the local datacentre before returning hosts in all // other datercentres func DCAwareRoundRobinPolicy(localDC string) HostSelectionPolicy { return &dcAwareRR{ - local: localDC, - localHosts: make(map[string]*HostInfo), - remoteHosts: make(map[string]*HostInfo), + local: localDC, } } func (d *dcAwareRR) AddHost(host *HostInfo) { - d.mu.Lock() - if host.DataCenter() == d.local { - d.localHosts[host.HostID()] = host + d.localHosts.add(host) } else { - d.remoteHosts[host.HostID()] = host + d.remoteHosts.add(host) } - - d.mu.Unlock() } func (d *dcAwareRR) RemoveHost(host *HostInfo) { - d.mu.Lock() - - delete(d.localHosts, host.HostID()) - delete(d.remoteHosts, host.HostID()) - - d.mu.Unlock() + if host.DataCenter() == d.local { + d.localHosts.remove(host.ConnectAddress()) + } else { + d.remoteHosts.remove(host.ConnectAddress()) + } } func (d *dcAwareRR) HostUp(host *HostInfo) { @@ -587,29 +589,28 @@ func (d *dcAwareRR) HostDown(host *HostInfo) { func (d *dcAwareRR) SetPartitioner(p string) {} func (d *dcAwareRR) Pick(q ExecutableQuery) NextHost { - d.mu.RLock() - - // TODO: this is O(len(hosts)) and requires calculating a full query plan for - // every query. On the other hand it is stupidly simply and provides random host - // order prefering local dcs over remote ones. - hosts := make([]*HostInfo, 0, len(d.localHosts)+len(d.remoteHosts)) - for _, host := range d.localHosts { - hosts = append(hosts, host) - } - for _, host := range d.remoteHosts { - hosts = append(hosts, host) - } - - d.mu.RUnlock() - + var i int return func() SelectedHost { + var hosts []*HostInfo + localHosts := d.localHosts.get() + remoteHosts := d.remoteHosts.get() + if len(localHosts) != 0 { + hosts = localHosts + } else { + hosts = remoteHosts + } if len(hosts) == 0 { return nil } - host := hosts[0] - hosts = hosts[1:] - + // always increment pos to evenly distribute traffic in case of + // failures + pos := atomic.AddUint32(&d.pos, 1) - 1 + if i >= len(localHosts)+len(remoteHosts) { + return nil + } + host := hosts[(pos)%uint32(len(hosts))] + i++ return (*selectedHost)(host) } } diff --git a/vendor/github.com/gocql/gocql/query_executor.go b/vendor/github.com/gocql/gocql/query_executor.go index 019beff280..4f98730161 100644 --- a/vendor/github.com/gocql/gocql/query_executor.go +++ b/vendor/github.com/gocql/gocql/query_executor.go @@ -17,6 +17,15 @@ type queryExecutor struct { policy HostSelectionPolicy } +func (q *queryExecutor) attemptQuery(qry ExecutableQuery, conn *Conn) *Iter { + start := time.Now() + iter := qry.execute(conn) + + qry.attempt(time.Since(start)) + + return iter +} + func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { rt := qry.retryPolicy() hostIter := q.policy.Pick(qry) @@ -38,10 +47,7 @@ func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) { continue } - start := time.Now() - iter = qry.execute(conn) - - qry.attempt(time.Since(start)) + iter = q.attemptQuery(qry, conn) // Update host hostResponse.Mark(iter.err) diff --git a/vendor/github.com/gocql/gocql/ring.go b/vendor/github.com/gocql/gocql/ring.go index d5a8ad0052..7ca6764538 100644 --- a/vendor/github.com/gocql/gocql/ring.go +++ b/vendor/github.com/gocql/gocql/ring.go @@ -64,6 +64,8 @@ func (r *ring) currentHosts() map[string]*HostInfo { } func (r *ring) addHost(host *HostInfo) bool { + // TODO(zariel): key all host info by HostID instead of + // ip addresses if host.invalidConnectAddr() { panic(fmt.Sprintf("invalid host: %v", host)) } diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go index 74a0a2e746..0ff6742910 100644 --- a/vendor/github.com/gocql/gocql/session.go +++ b/vendor/github.com/gocql/gocql/session.go @@ -161,6 +161,18 @@ func (s *Session) init() error { return err } + allHosts := hosts + hosts = hosts[:0] + hostMap := make(map[string]*HostInfo, len(allHosts)) + for _, host := range allHosts { + if !s.cfg.filterHost(host) { + hosts = append(hosts, host) + hostMap[host.ConnectAddress().String()] = host + } + } + + s.ring.endpoints = hosts + if !s.cfg.disableControlConn { s.control = createControlConn(s) if s.cfg.ProtoVersion == 0 { @@ -182,17 +194,20 @@ func (s *Session) init() error { if !s.cfg.DisableInitialHostLookup { var partitioner string - hosts, partitioner, err = s.hostSource.GetHosts() + newHosts, partitioner, err := s.hostSource.GetHosts() if err != nil { return err } s.policy.SetPartitioner(partitioner) + for _, host := range newHosts { + hostMap[host.ConnectAddress().String()] = host + } } } - for _, host := range hosts { + for _, host := range hostMap { host = s.ring.addOrUpdate(host) - s.handleNodeUp(host.ConnectAddress(), host.Port(), false) + s.addNewNode(host) } // TODO(zariel): we probably dont need this any more as we verify that we @@ -210,7 +225,8 @@ func (s *Session) init() error { newer, _ := checkSystemSchema(s.control) s.useSystemSchema = newer } else { - s.useSystemSchema = hosts[0].Version().Major >= 3 + host := s.ring.rrHost() + s.useSystemSchema = host.Version().Major >= 3 } if s.pool.Size() == 0 { @@ -639,7 +655,7 @@ func (s *Session) MapExecuteBatchCAS(batch *Batch, dest map[string]interface{}) } func (s *Session) connect(host *HostInfo, errorHandler ConnErrorHandler) (*Conn, error) { - return Connect(host, s.connCfg, errorHandler, s) + return s.dial(host.ConnectAddress(), host.Port(), s.connCfg, errorHandler) } // Query represents a CQL statement that can be executed. @@ -1052,8 +1068,21 @@ func (iter *Iter) Columns() []ColumnInfo { } type Scanner interface { + // Next advances the row pointer to point at the next row, the row is valid until + // the next call of Next. It returns true if there is a row which is available to be + // scanned into with Scan. + // Next must be called before every call to Scan. Next() bool + + // Scan copies the current row's columns into dest. If the length of dest does not equal + // the number of columns returned in the row an error is returned. If an error is encountered + // when unmarshalling a column into the value in dest an error is returned and the row is invalidated + // until the next call to Next. + // Next must be called before calling Scan, if it is not an error is returned. Scan(...interface{}) error + + // Err returns the if there was one during iteration that resulted in iteration being unable to complete. + // Err will also release resources held by the iterator, the Scanner should not used after being called. Err() error } @@ -1062,10 +1091,6 @@ type iterScanner struct { cols [][]byte } -// Next advances the row pointer to point at the next row, the row is valid until -// the next call of Next. It returns true if there is a row which is available to be -// scanned into with Scan. -// Next must be called before every call to Scan. func (is *iterScanner) Next() bool { iter := is.iter if iter.err != nil { @@ -1119,11 +1144,6 @@ func scanColumn(p []byte, col ColumnInfo, dest []interface{}) (int, error) { } } -// Scan copies the current row's columns into dest. If the length of dest does not equal -// the number of columns returned in the row an error is returned. If an error is encountered -// when unmarshalling a column into the value in dest an error is returned and the row is invalidated -// until the next call to Next. -// Next must be called before calling Scan, if it is not an error is returned. func (is *iterScanner) Scan(dest ...interface{}) error { if is.cols == nil { return errors.New("gocql: Scan called without calling Next") @@ -1154,8 +1174,6 @@ func (is *iterScanner) Scan(dest ...interface{}) error { return err } -// Err returns the if there was one during iteration that resulted in iteration being unable to complete. -// Err will also release resources held by the iterator and should not used after being called. func (is *iterScanner) Err() error { iter := is.iter is.iter = nil @@ -1299,11 +1317,17 @@ type nextIter struct { pos int once sync.Once next *Iter + conn *Conn } func (n *nextIter) fetch() *Iter { n.once.Do(func() { - n.next = n.qry.session.executeQuery(&n.qry) + iter := n.qry.session.executor.attemptQuery(&n.qry, n.conn) + if iter != nil && iter.err == nil { + n.next = iter + } else { + n.next = n.qry.session.executeQuery(&n.qry) + } }) return n.next } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 1d92cb272a..c6a91bcab9 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -11,6 +11,7 @@ It has these top-level messages: FileDescriptorSet FileDescriptorProto DescriptorProto + ExtensionRangeOptions FieldDescriptorProto OneofDescriptorProto EnumDescriptorProto @@ -137,7 +138,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -176,7 +177,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{3, 1} + return fileDescriptor0, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -216,7 +217,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } type FieldOptions_CType int32 @@ -254,7 +255,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } type FieldOptions_JSType int32 @@ -294,7 +295,7 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe @@ -335,7 +336,7 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{16, 0} + return fileDescriptor0, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -567,9 +568,10 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } @@ -593,6 +595,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. @@ -623,6 +632,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -661,7 +697,7 @@ type FieldDescriptorProto struct { func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -743,7 +779,7 @@ type OneofDescriptorProto struct { func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -770,7 +806,7 @@ type EnumDescriptorProto struct { func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -804,7 +840,7 @@ type EnumValueDescriptorProto struct { func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -838,7 +874,7 @@ type ServiceDescriptorProto struct { func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -879,7 +915,7 @@ type MethodDescriptorProto struct { func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -974,7 +1010,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1009,7 +1045,7 @@ type FileOptions struct { func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1222,7 +1258,7 @@ type MessageOptions struct { func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1285,13 +1321,15 @@ type FieldOptions struct { Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the @@ -1338,7 +1376,7 @@ type FieldOptions struct { func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1413,7 +1451,7 @@ type OneofOptions struct { func (m *OneofOptions) Reset() { *m = OneofOptions{} } func (m *OneofOptions) String() string { return proto.CompactTextString(m) } func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } var extRange_OneofOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1448,7 +1486,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1496,7 +1534,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1537,7 +1575,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1579,7 +1617,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1635,7 +1673,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1701,7 +1739,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor0, []int{18, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1771,7 +1809,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1867,7 +1905,7 @@ type SourceCodeInfo_Location struct { func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1917,7 +1955,7 @@ type GeneratedCodeInfo struct { func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1946,7 +1984,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{19, 0} + return fileDescriptor0, []int{20, 0} } func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -1983,6 +2021,7 @@ func init() { proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") @@ -2014,161 +2053,163 @@ func init() { func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2490 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x8e, 0x7e, 0x57, 0x3a, 0xd2, 0x6a, 0x67, 0x67, 0x37, 0x36, 0xbd, 0xf9, 0xf1, 0x5a, 0xf9, - 0xf1, 0x3a, 0x69, 0xb4, 0xc1, 0xc6, 0x76, 0x9c, 0x4d, 0xe1, 0x42, 0x2b, 0xd1, 0x1b, 0xb9, 0x5a, - 0x49, 0xa5, 0xb4, 0x8d, 0x9d, 0x1b, 0x62, 0x96, 0x1c, 0x49, 0xb4, 0x29, 0x92, 0x21, 0x29, 0xdb, - 0x9b, 0x2b, 0x03, 0xbd, 0x2a, 0xd0, 0x07, 0x28, 0x8a, 0xa2, 0x17, 0xb9, 0x09, 0xd0, 0x07, 0x28, - 0xd0, 0xbb, 0x3e, 0x41, 0x81, 0xbc, 0x41, 0x51, 0x14, 0x68, 0xdf, 0xa0, 0xb7, 0xc5, 0xcc, 0x90, - 0x14, 0xa9, 0x1f, 0x7b, 0x1b, 0xc0, 0xc9, 0x95, 0x34, 0xdf, 0xf9, 0xce, 0x99, 0x33, 0x67, 0xce, - 0xcc, 0x9c, 0x19, 0xc2, 0xee, 0xc8, 0xb6, 0x47, 0x26, 0xdd, 0x77, 0x5c, 0xdb, 0xb7, 0xcf, 0xa6, - 0xc3, 0x7d, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0xd6, 0x38, 0x86, 0x37, 0x04, 0xa3, 0x16, - 0x32, 0xaa, 0x27, 0xb0, 0x79, 0xcf, 0x30, 0x69, 0x33, 0x22, 0xf6, 0xa9, 0x8f, 0xef, 0x40, 0x76, - 0x68, 0x98, 0x54, 0x4a, 0xed, 0x66, 0xf6, 0x4a, 0x07, 0xef, 0xd6, 0xe6, 0x94, 0x6a, 0x49, 0x8d, - 0x1e, 0x83, 0x15, 0xae, 0x51, 0xfd, 0x57, 0x16, 0xb6, 0x96, 0x48, 0x31, 0x86, 0xac, 0x45, 0x26, - 0xcc, 0x62, 0x6a, 0xaf, 0xa8, 0xf0, 0xff, 0x58, 0x82, 0x35, 0x87, 0x68, 0x8f, 0xc9, 0x88, 0x4a, - 0x69, 0x0e, 0x87, 0x4d, 0xfc, 0x36, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa5, 0xcc, - 0x6e, 0x66, 0xaf, 0xa8, 0xc4, 0x10, 0xfc, 0x21, 0x6c, 0x3a, 0xd3, 0x33, 0xd3, 0xd0, 0xd4, 0x18, - 0x0d, 0x76, 0x33, 0x7b, 0x39, 0x05, 0x09, 0x41, 0x73, 0x46, 0xbe, 0x0e, 0x1b, 0x4f, 0x29, 0x79, - 0x1c, 0xa7, 0x96, 0x38, 0xb5, 0xc2, 0xe0, 0x18, 0xb1, 0x01, 0xe5, 0x09, 0xf5, 0x3c, 0x32, 0xa2, - 0xaa, 0x7f, 0xee, 0x50, 0x29, 0xcb, 0x47, 0xbf, 0xbb, 0x30, 0xfa, 0xf9, 0x91, 0x97, 0x02, 0xad, - 0xc1, 0xb9, 0x43, 0x71, 0x1d, 0x8a, 0xd4, 0x9a, 0x4e, 0x84, 0x85, 0xdc, 0x8a, 0xf8, 0xc9, 0xd6, - 0x74, 0x32, 0x6f, 0xa5, 0xc0, 0xd4, 0x02, 0x13, 0x6b, 0x1e, 0x75, 0x9f, 0x18, 0x1a, 0x95, 0xf2, - 0xdc, 0xc0, 0xf5, 0x05, 0x03, 0x7d, 0x21, 0x9f, 0xb7, 0x11, 0xea, 0xe1, 0x06, 0x14, 0xe9, 0x33, - 0x9f, 0x5a, 0x9e, 0x61, 0x5b, 0xd2, 0x1a, 0x37, 0xf2, 0xde, 0x92, 0x59, 0xa4, 0xa6, 0x3e, 0x6f, - 0x62, 0xa6, 0x87, 0x6f, 0xc3, 0x9a, 0xed, 0xf8, 0x86, 0x6d, 0x79, 0x52, 0x61, 0x37, 0xb5, 0x57, - 0x3a, 0x78, 0x73, 0x69, 0x22, 0x74, 0x05, 0x47, 0x09, 0xc9, 0xb8, 0x05, 0xc8, 0xb3, 0xa7, 0xae, - 0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x03, 0x57, 0x17, 0x07, 0xc2, - 0x89, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0xd1, 0xc6, 0x97, 0x20, 0xef, 0x9d, - 0x5b, 0x3e, 0x79, 0x26, 0x95, 0x79, 0x86, 0x04, 0xad, 0xea, 0x7f, 0x73, 0xb0, 0x71, 0x91, 0x14, - 0xfb, 0x1c, 0x72, 0x43, 0x36, 0x4a, 0x29, 0xfd, 0xff, 0xc4, 0x40, 0xe8, 0x24, 0x83, 0x98, 0xff, - 0x81, 0x41, 0xac, 0x43, 0xc9, 0xa2, 0x9e, 0x4f, 0x75, 0x91, 0x11, 0x99, 0x0b, 0xe6, 0x14, 0x08, - 0xa5, 0xc5, 0x94, 0xca, 0xfe, 0xa0, 0x94, 0x7a, 0x00, 0x1b, 0x91, 0x4b, 0xaa, 0x4b, 0xac, 0x51, - 0x98, 0x9b, 0xfb, 0x2f, 0xf3, 0xa4, 0x26, 0x87, 0x7a, 0x0a, 0x53, 0x53, 0x2a, 0x34, 0xd1, 0xc6, - 0x4d, 0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x15, 0x51, 0xea, 0x32, 0xca, - 0x42, 0x94, 0x6c, 0x81, 0x6a, 0x26, 0xfe, 0x6c, 0x96, 0x6a, 0x6b, 0x2b, 0x32, 0xe5, 0x44, 0x2c, - 0xb2, 0x85, 0x6c, 0x3b, 0x85, 0x8a, 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x18, 0x59, 0x91, 0x3b, 0x51, - 0x7b, 0xe9, 0xc8, 0x94, 0x40, 0x4d, 0x0c, 0x6c, 0xdd, 0x8d, 0x37, 0xf1, 0x3b, 0x10, 0x01, 0x2a, - 0x4f, 0x2b, 0xe0, 0xbb, 0x50, 0x39, 0x04, 0x3b, 0x64, 0x42, 0x77, 0xee, 0x40, 0x25, 0x19, 0x1e, - 0xbc, 0x0d, 0x39, 0xcf, 0x27, 0xae, 0xcf, 0xb3, 0x30, 0xa7, 0x88, 0x06, 0x46, 0x90, 0xa1, 0x96, - 0xce, 0x77, 0xb9, 0x9c, 0xc2, 0xfe, 0xee, 0x7c, 0x0a, 0xeb, 0x89, 0xee, 0x2f, 0xaa, 0x58, 0xfd, - 0x7d, 0x1e, 0xb6, 0x97, 0xe5, 0xdc, 0xd2, 0xf4, 0xbf, 0x04, 0x79, 0x6b, 0x3a, 0x39, 0xa3, 0xae, - 0x94, 0xe1, 0x16, 0x82, 0x16, 0xae, 0x43, 0xce, 0x24, 0x67, 0xd4, 0x94, 0xb2, 0xbb, 0xa9, 0xbd, - 0xca, 0xc1, 0x87, 0x17, 0xca, 0xea, 0x5a, 0x9b, 0xa9, 0x28, 0x42, 0x13, 0xdf, 0x85, 0x6c, 0xb0, - 0xc5, 0x31, 0x0b, 0x1f, 0x5c, 0xcc, 0x02, 0xcb, 0x45, 0x85, 0xeb, 0xe1, 0x37, 0xa0, 0xc8, 0x7e, - 0x45, 0x6c, 0xf3, 0xdc, 0xe7, 0x02, 0x03, 0x58, 0x5c, 0xf1, 0x0e, 0x14, 0x78, 0x9a, 0xe9, 0x34, - 0x3c, 0x1a, 0xa2, 0x36, 0x9b, 0x18, 0x9d, 0x0e, 0xc9, 0xd4, 0xf4, 0xd5, 0x27, 0xc4, 0x9c, 0x52, - 0x9e, 0x30, 0x45, 0xa5, 0x1c, 0x80, 0xbf, 0x66, 0x18, 0xbe, 0x0a, 0x25, 0x91, 0x95, 0x86, 0xa5, - 0xd3, 0x67, 0x7c, 0xf7, 0xc9, 0x29, 0x22, 0x51, 0x5b, 0x0c, 0x61, 0xdd, 0x3f, 0xf2, 0x6c, 0x2b, - 0x9c, 0x5a, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x9f, 0xce, 0x6f, 0x7c, 0x6f, 0x2d, 0x1f, 0xde, 0x7c, - 0x2e, 0x56, 0xff, 0x92, 0x86, 0x2c, 0x5f, 0x6f, 0x1b, 0x50, 0x1a, 0x3c, 0xec, 0xc9, 0x6a, 0xb3, - 0x7b, 0x7a, 0xd4, 0x96, 0x51, 0x0a, 0x57, 0x00, 0x38, 0x70, 0xaf, 0xdd, 0xad, 0x0f, 0x50, 0x3a, - 0x6a, 0xb7, 0x3a, 0x83, 0xdb, 0x37, 0x51, 0x26, 0x52, 0x38, 0x15, 0x40, 0x36, 0x4e, 0xf8, 0xe4, - 0x00, 0xe5, 0x30, 0x82, 0xb2, 0x30, 0xd0, 0x7a, 0x20, 0x37, 0x6f, 0xdf, 0x44, 0xf9, 0x24, 0xf2, - 0xc9, 0x01, 0x5a, 0xc3, 0xeb, 0x50, 0xe4, 0xc8, 0x51, 0xb7, 0xdb, 0x46, 0x85, 0xc8, 0x66, 0x7f, - 0xa0, 0xb4, 0x3a, 0xc7, 0xa8, 0x18, 0xd9, 0x3c, 0x56, 0xba, 0xa7, 0x3d, 0x04, 0x91, 0x85, 0x13, - 0xb9, 0xdf, 0xaf, 0x1f, 0xcb, 0xa8, 0x14, 0x31, 0x8e, 0x1e, 0x0e, 0xe4, 0x3e, 0x2a, 0x27, 0xdc, - 0xfa, 0xe4, 0x00, 0xad, 0x47, 0x5d, 0xc8, 0x9d, 0xd3, 0x13, 0x54, 0xc1, 0x9b, 0xb0, 0x2e, 0xba, - 0x08, 0x9d, 0xd8, 0x98, 0x83, 0x6e, 0xdf, 0x44, 0x68, 0xe6, 0x88, 0xb0, 0xb2, 0x99, 0x00, 0x6e, - 0xdf, 0x44, 0xb8, 0xda, 0x80, 0x1c, 0xcf, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc9, 0x6d, 0xb5, - 0xdb, 0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0x66, 0x98, 0x22, 0xff, 0xea, 0xb4, 0xa5, 0xc8, - 0x4d, 0x94, 0x8e, 0x63, 0x3d, 0xb9, 0x3e, 0x90, 0x9b, 0x28, 0x53, 0xd5, 0x60, 0x7b, 0xd9, 0x3e, - 0xb3, 0x74, 0x65, 0xc4, 0xa6, 0x38, 0xbd, 0x62, 0x8a, 0xb9, 0xad, 0x85, 0x29, 0xfe, 0x36, 0x05, - 0x5b, 0x4b, 0xf6, 0xda, 0xa5, 0x9d, 0xfc, 0x02, 0x72, 0x22, 0x45, 0xc5, 0xe9, 0x73, 0x63, 0xe9, - 0xa6, 0xcd, 0x13, 0x76, 0xe1, 0x04, 0xe2, 0x7a, 0xf1, 0x13, 0x38, 0xb3, 0xe2, 0x04, 0x66, 0x26, - 0x16, 0x9c, 0xfc, 0x4d, 0x0a, 0xa4, 0x55, 0xb6, 0x5f, 0xb2, 0x51, 0xa4, 0x13, 0x1b, 0xc5, 0xe7, - 0xf3, 0x0e, 0x5c, 0x5b, 0x3d, 0x86, 0x05, 0x2f, 0xbe, 0x4b, 0xc1, 0xa5, 0xe5, 0x85, 0xca, 0x52, - 0x1f, 0xee, 0x42, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x1e, 0xd6, 0xef, 0x2f, 0x39, 0x02, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xc9, 0xac, 0xaa, 0x36, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x36, - 0x0d, 0xaf, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xb7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xe2, 0x40, 0x16, + // 2519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, + 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, + 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, + 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, + 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, + 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, + 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, + 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, + 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, + 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, + 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, + 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, + 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, + 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, + 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, + 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, + 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, + 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, + 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, + 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, + 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, + 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, + 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, + 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, + 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, + 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, + 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, + 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, + 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, + 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, + 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, + 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, + 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, + 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, + 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, + 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, + 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, + 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, + 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, + 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, + 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, + 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, + 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, + 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, + 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, + 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, + 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, + 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, + 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, + 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, + 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, + 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, + 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, + 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, + 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, + 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, + 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, + 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, + 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, + 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, + 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, + 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, + 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, + 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, + 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, + 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, + 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, + 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xce, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xed, 0x15, 0x23, 0x5d, 0x38, 0xeb, 0x3e, 0x06, + 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0x4f, 0x19, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0x6b, 0x01, 0x4a, 0xb1, 0xb2, 0x0e, 0x5f, - 0x83, 0xf2, 0x23, 0xf2, 0x84, 0xa8, 0x61, 0xa9, 0x2e, 0x22, 0x51, 0x62, 0x58, 0x2f, 0x28, 0xd7, - 0x3f, 0x86, 0x6d, 0x4e, 0xb1, 0xa7, 0x3e, 0x75, 0x55, 0xcd, 0x24, 0x9e, 0xc7, 0x83, 0x56, 0xe0, - 0x54, 0xcc, 0x64, 0x5d, 0x26, 0x6a, 0x84, 0x12, 0x7c, 0x0b, 0xb6, 0xb8, 0xc6, 0x64, 0x6a, 0xfa, - 0x86, 0x63, 0x52, 0x95, 0x5d, 0x1e, 0x3c, 0xbe, 0x11, 0x47, 0x9e, 0x6d, 0x32, 0xc6, 0x49, 0x40, - 0x60, 0x1e, 0x79, 0xb8, 0x09, 0x6f, 0x71, 0xb5, 0x11, 0xb5, 0xa8, 0x4b, 0x7c, 0xaa, 0xd2, 0xaf, - 0xa7, 0xc4, 0xf4, 0x54, 0x62, 0xe9, 0xea, 0x98, 0x78, 0x63, 0x69, 0x9b, 0x19, 0x38, 0x4a, 0x4b, - 0x29, 0xe5, 0x0a, 0x23, 0x1e, 0x07, 0x3c, 0x99, 0xd3, 0xea, 0x96, 0xfe, 0x05, 0xf1, 0xc6, 0xf8, - 0x10, 0x2e, 0x71, 0x2b, 0x9e, 0xef, 0x1a, 0xd6, 0x48, 0xd5, 0xc6, 0x54, 0x7b, 0xac, 0x4e, 0xfd, - 0xe1, 0x1d, 0xe9, 0x8d, 0x78, 0xff, 0xdc, 0xc3, 0x3e, 0xe7, 0x34, 0x18, 0xe5, 0xd4, 0x1f, 0xde, - 0xc1, 0x7d, 0x28, 0xb3, 0xc9, 0x98, 0x18, 0xdf, 0x50, 0x75, 0x68, 0xbb, 0xfc, 0x64, 0xa9, 0x2c, - 0x59, 0xd9, 0xb1, 0x08, 0xd6, 0xba, 0x81, 0xc2, 0x89, 0xad, 0xd3, 0xc3, 0x5c, 0xbf, 0x27, 0xcb, - 0x4d, 0xa5, 0x14, 0x5a, 0xb9, 0x67, 0xbb, 0x2c, 0xa1, 0x46, 0x76, 0x14, 0xe0, 0x92, 0x48, 0xa8, - 0x91, 0x1d, 0x86, 0xf7, 0x16, 0x6c, 0x69, 0x9a, 0x18, 0xb3, 0xa1, 0xa9, 0x41, 0x89, 0xef, 0x49, - 0x28, 0x11, 0x2c, 0x4d, 0x3b, 0x16, 0x84, 0x20, 0xc7, 0x3d, 0xfc, 0x19, 0xbc, 0x3e, 0x0b, 0x56, - 0x5c, 0x71, 0x73, 0x61, 0x94, 0xf3, 0xaa, 0xb7, 0x60, 0xcb, 0x39, 0x5f, 0x54, 0xc4, 0x89, 0x1e, - 0x9d, 0xf3, 0x79, 0xb5, 0x4f, 0x61, 0xdb, 0x19, 0x3b, 0x8b, 0x7a, 0x5b, 0x71, 0x3d, 0xec, 0x8c, - 0x9d, 0x79, 0xc5, 0xf7, 0xf8, 0x7d, 0xcf, 0xa5, 0x1a, 0xf1, 0xa9, 0x2e, 0x5d, 0x8e, 0xd3, 0x63, - 0x02, 0xbc, 0x0f, 0x48, 0xd3, 0x54, 0x6a, 0x91, 0x33, 0x93, 0xaa, 0xc4, 0xa5, 0x16, 0xf1, 0xa4, - 0xab, 0x71, 0x72, 0x45, 0xd3, 0x64, 0x2e, 0xad, 0x73, 0x21, 0xfe, 0x00, 0x36, 0xed, 0xb3, 0x47, - 0x9a, 0x48, 0x49, 0xd5, 0x71, 0xe9, 0xd0, 0x78, 0x26, 0xbd, 0xcb, 0xe3, 0xbb, 0xc1, 0x04, 0x3c, - 0x21, 0x7b, 0x1c, 0xc6, 0x37, 0x00, 0x69, 0xde, 0x98, 0xb8, 0x0e, 0xaf, 0x09, 0x3c, 0x87, 0x68, - 0x54, 0x7a, 0x4f, 0x50, 0x05, 0xde, 0x09, 0x61, 0xb6, 0x24, 0xbc, 0xa7, 0xc6, 0xd0, 0x0f, 0x2d, - 0x5e, 0x17, 0x4b, 0x82, 0x63, 0x81, 0xb5, 0x3d, 0x40, 0x2c, 0x14, 0x89, 0x8e, 0xf7, 0x38, 0xad, - 0xe2, 0x8c, 0x9d, 0x78, 0xbf, 0xef, 0xc0, 0x3a, 0x63, 0xce, 0x3a, 0xbd, 0x21, 0xea, 0x19, 0x67, - 0x1c, 0xeb, 0xf1, 0x01, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x5d, 0x26, 0xc4, - 0x8e, 0x20, 0xfd, 0x7b, 0x6d, 0xc5, 0x75, 0xe0, 0x34, 0xce, 0x16, 0x89, 0xa8, 0x6c, 0x4d, 0x17, - 0xc1, 0xea, 0x21, 0x94, 0xe3, 0xf9, 0x89, 0x8b, 0x20, 0x32, 0x14, 0xa5, 0xd8, 0x59, 0xdf, 0xe8, - 0x36, 0xd9, 0x29, 0xfd, 0x95, 0x8c, 0xd2, 0xac, 0x5a, 0x68, 0xb7, 0x06, 0xb2, 0xaa, 0x9c, 0x76, - 0x06, 0xad, 0x13, 0x19, 0x65, 0x3e, 0x28, 0x16, 0xfe, 0xb3, 0x86, 0x9e, 0x3f, 0x7f, 0xfe, 0x3c, - 0x7d, 0x3f, 0x5b, 0x78, 0x1f, 0x5d, 0xaf, 0x7e, 0x9f, 0x86, 0x4a, 0xb2, 0x4e, 0xc7, 0x3f, 0x87, - 0xcb, 0xe1, 0xa5, 0xda, 0xa3, 0xbe, 0xfa, 0xd4, 0x70, 0xf9, 0xc2, 0x99, 0x10, 0x51, 0xe9, 0x46, - 0x53, 0xb7, 0x1d, 0xb0, 0xfa, 0xd4, 0xff, 0xd2, 0x70, 0xd9, 0xb2, 0x98, 0x10, 0x1f, 0xb7, 0xe1, - 0xaa, 0x65, 0xab, 0x9e, 0x4f, 0x2c, 0x9d, 0xb8, 0xba, 0x3a, 0x7b, 0xce, 0x50, 0x89, 0xa6, 0x51, - 0xcf, 0xb3, 0xc5, 0x81, 0x15, 0x59, 0x79, 0xd3, 0xb2, 0xfb, 0x01, 0x79, 0xb6, 0x93, 0xd7, 0x03, - 0xea, 0x5c, 0x9a, 0x65, 0x56, 0xa5, 0xd9, 0x1b, 0x50, 0x9c, 0x10, 0x47, 0xa5, 0x96, 0xef, 0x9e, - 0xf3, 0xea, 0xb2, 0xa0, 0x14, 0x26, 0xc4, 0x91, 0x59, 0xfb, 0xd5, 0xcd, 0x44, 0x32, 0x9a, 0x05, - 0x54, 0xbc, 0x9f, 0x2d, 0x14, 0x11, 0x54, 0xff, 0x99, 0x81, 0x72, 0xbc, 0xda, 0x64, 0xc5, 0xbb, - 0xc6, 0x4f, 0x96, 0x14, 0xdf, 0x7b, 0xde, 0x79, 0x61, 0x6d, 0x5a, 0x6b, 0xb0, 0x23, 0xe7, 0x30, - 0x2f, 0x6a, 0x40, 0x45, 0x68, 0xb2, 0xe3, 0x9e, 0xed, 0x36, 0x54, 0xdc, 0x2c, 0x0a, 0x4a, 0xd0, - 0xc2, 0xc7, 0x90, 0x7f, 0xe4, 0x71, 0xdb, 0x79, 0x6e, 0xfb, 0xdd, 0x17, 0xdb, 0xbe, 0xdf, 0xe7, - 0xc6, 0x8b, 0xf7, 0xfb, 0x6a, 0xa7, 0xab, 0x9c, 0xd4, 0xdb, 0x4a, 0xa0, 0x8e, 0xaf, 0x40, 0xd6, - 0x24, 0xdf, 0x9c, 0x27, 0x0f, 0x27, 0x0e, 0x5d, 0x74, 0x12, 0xae, 0x40, 0xf6, 0x29, 0x25, 0x8f, - 0x93, 0x47, 0x02, 0x87, 0x5e, 0xe1, 0x62, 0xd8, 0x87, 0x1c, 0x8f, 0x17, 0x06, 0x08, 0x22, 0x86, - 0x5e, 0xc3, 0x05, 0xc8, 0x36, 0xba, 0x0a, 0x5b, 0x10, 0x08, 0xca, 0x02, 0x55, 0x7b, 0x2d, 0xb9, - 0x21, 0xa3, 0x74, 0xf5, 0x16, 0xe4, 0x45, 0x10, 0xd8, 0x62, 0x89, 0xc2, 0x80, 0x5e, 0x0b, 0x9a, - 0x81, 0x8d, 0x54, 0x28, 0x3d, 0x3d, 0x39, 0x92, 0x15, 0x94, 0x4e, 0x4e, 0x75, 0x16, 0xe5, 0xaa, - 0x1e, 0x94, 0xe3, 0xe5, 0xe6, 0x8f, 0x92, 0x65, 0xd5, 0xbf, 0xa5, 0xa0, 0x14, 0x2b, 0x1f, 0x59, - 0xe1, 0x42, 0x4c, 0xd3, 0x7e, 0xaa, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, - 0x2e, 0x3a, 0x75, 0x3f, 0xd2, 0x12, 0xc9, 0xa1, 0x7c, 0xf5, 0x4f, 0x29, 0x40, 0xf3, 0x05, 0xe8, - 0x9c, 0x9b, 0xa9, 0x9f, 0xd2, 0xcd, 0xea, 0x1f, 0x53, 0x50, 0x49, 0x56, 0x9d, 0x73, 0xee, 0x5d, - 0xfb, 0x49, 0xdd, 0xfb, 0x47, 0x1a, 0xd6, 0x13, 0xb5, 0xe6, 0x45, 0xbd, 0xfb, 0x1a, 0x36, 0x0d, - 0x9d, 0x4e, 0x1c, 0xdb, 0xa7, 0x96, 0x76, 0xae, 0x9a, 0xf4, 0x09, 0x35, 0xa5, 0x2a, 0xdf, 0x34, - 0xf6, 0x5f, 0x5c, 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0x93, - 0x5e, 0x77, 0x20, 0x77, 0x1a, 0x0f, 0xd5, 0xd3, 0xce, 0x2f, 0x3b, 0xdd, 0x2f, 0x3b, 0x0a, 0x32, - 0xe6, 0x68, 0xaf, 0x70, 0xd9, 0xf7, 0x00, 0xcd, 0x3b, 0x85, 0x2f, 0xc3, 0x32, 0xb7, 0xd0, 0x6b, - 0x78, 0x0b, 0x36, 0x3a, 0x5d, 0xb5, 0xdf, 0x6a, 0xca, 0xaa, 0x7c, 0xef, 0x9e, 0xdc, 0x18, 0xf4, - 0xc5, 0xf5, 0x3e, 0x62, 0x0f, 0x12, 0x0b, 0xbc, 0xfa, 0x87, 0x0c, 0x6c, 0x2d, 0xf1, 0x04, 0xd7, - 0x83, 0x9b, 0x85, 0xb8, 0xec, 0x7c, 0x74, 0x11, 0xef, 0x6b, 0xac, 0x20, 0xe8, 0x11, 0xd7, 0x0f, - 0x2e, 0x22, 0x37, 0x80, 0x45, 0xc9, 0xf2, 0x8d, 0xa1, 0x41, 0xdd, 0xe0, 0x35, 0x44, 0x5c, 0x37, - 0x36, 0x66, 0xb8, 0x78, 0x10, 0xf9, 0x19, 0x60, 0xc7, 0xf6, 0x0c, 0xdf, 0x78, 0x42, 0x55, 0xc3, - 0x0a, 0x9f, 0x4e, 0xd8, 0xf5, 0x23, 0xab, 0xa0, 0x50, 0xd2, 0xb2, 0xfc, 0x88, 0x6d, 0xd1, 0x11, - 0x99, 0x63, 0xb3, 0xcd, 0x3c, 0xa3, 0xa0, 0x50, 0x12, 0xb1, 0xaf, 0x41, 0x59, 0xb7, 0xa7, 0xac, - 0x26, 0x13, 0x3c, 0x76, 0x76, 0xa4, 0x94, 0x92, 0xc0, 0x22, 0x4a, 0x50, 0x6d, 0xcf, 0xde, 0x6c, - 0xca, 0x4a, 0x49, 0x60, 0x82, 0x72, 0x1d, 0x36, 0xc8, 0x68, 0xe4, 0x32, 0xe3, 0xa1, 0x21, 0x71, - 0x7f, 0xa8, 0x44, 0x30, 0x27, 0xee, 0xdc, 0x87, 0x42, 0x18, 0x07, 0x76, 0x54, 0xb3, 0x48, 0xa8, - 0x8e, 0x78, 0x39, 0x4b, 0xef, 0x15, 0x95, 0x82, 0x15, 0x0a, 0xaf, 0x41, 0xd9, 0xf0, 0xd4, 0xd9, - 0x13, 0x6e, 0x7a, 0x37, 0xbd, 0x57, 0x50, 0x4a, 0x86, 0x17, 0xbd, 0xd9, 0x55, 0xbf, 0x4b, 0x43, - 0x25, 0xf9, 0x04, 0x8d, 0x9b, 0x50, 0x30, 0x6d, 0x8d, 0xf0, 0xd4, 0x12, 0xdf, 0x3f, 0xf6, 0x5e, - 0xf2, 0x6a, 0x5d, 0x6b, 0x07, 0x7c, 0x25, 0xd2, 0xdc, 0xf9, 0x7b, 0x0a, 0x0a, 0x21, 0x8c, 0x2f, - 0x41, 0xd6, 0x21, 0xfe, 0x98, 0x9b, 0xcb, 0x1d, 0xa5, 0x51, 0x4a, 0xe1, 0x6d, 0x86, 0x7b, 0x0e, - 0xb1, 0x78, 0x0a, 0x04, 0x38, 0x6b, 0xb3, 0x79, 0x35, 0x29, 0xd1, 0xf9, 0xe5, 0xc4, 0x9e, 0x4c, - 0xa8, 0xe5, 0x7b, 0xe1, 0xbc, 0x06, 0x78, 0x23, 0x80, 0xf1, 0x87, 0xb0, 0xe9, 0xbb, 0xc4, 0x30, - 0x13, 0xdc, 0x2c, 0xe7, 0xa2, 0x50, 0x10, 0x91, 0x0f, 0xe1, 0x4a, 0x68, 0x57, 0xa7, 0x3e, 0xd1, - 0xc6, 0x54, 0x9f, 0x29, 0xe5, 0xf9, 0xfb, 0xe6, 0xe5, 0x80, 0xd0, 0x0c, 0xe4, 0xa1, 0x6e, 0xf5, - 0xfb, 0x14, 0x6c, 0x86, 0xd7, 0x29, 0x3d, 0x0a, 0xd6, 0x09, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, - 0x6b, 0x31, 0x95, 0x17, 0xf4, 0x6a, 0xf5, 0x48, 0x49, 0x89, 0x19, 0xd8, 0x99, 0x00, 0xcc, 0x24, - 0x2b, 0xc3, 0x76, 0x15, 0x4a, 0xc1, 0xf7, 0x05, 0xfe, 0x91, 0x4a, 0x5c, 0xc0, 0x41, 0x40, 0xec, - 0xde, 0x85, 0xb7, 0x21, 0x77, 0x46, 0x47, 0x86, 0x15, 0xbc, 0x7a, 0x8a, 0x46, 0xf8, 0x96, 0x9a, - 0x8d, 0xde, 0x52, 0x8f, 0x7e, 0x97, 0x82, 0x2d, 0xcd, 0x9e, 0xcc, 0xfb, 0x7b, 0x84, 0xe6, 0x5e, - 0x01, 0xbc, 0x2f, 0x52, 0x5f, 0xdd, 0x1d, 0x19, 0xfe, 0x78, 0x7a, 0x56, 0xd3, 0xec, 0xc9, 0xfe, - 0xc8, 0x36, 0x89, 0x35, 0x9a, 0x7d, 0x65, 0xe3, 0x7f, 0xb4, 0x8f, 0x46, 0xd4, 0xfa, 0x68, 0x64, - 0xc7, 0xbe, 0xb9, 0x7d, 0x3e, 0xfb, 0xfb, 0x6d, 0x3a, 0x73, 0xdc, 0x3b, 0xfa, 0x73, 0x7a, 0xe7, - 0x58, 0xf4, 0xd5, 0x0b, 0x63, 0xa3, 0xd0, 0xa1, 0x49, 0x35, 0x36, 0xde, 0xff, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xa2, 0xc3, 0x4e, 0x18, 0xbe, 0x1b, 0x00, 0x00, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, + 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, + 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, + 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, + 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, + 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, + 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, + 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, + 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, + 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, + 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, + 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, + 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, + 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, + 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, + 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, + 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, + 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, + 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, + 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, + 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, + 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, + 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, + 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, + 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, + 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, + 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, + 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, + 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, + 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, + 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, + 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, + 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, + 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, + 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, + 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, + 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, + 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, + 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, + 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, + 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, + 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, + 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, + 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, + 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, + 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, + 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, + 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, + 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, + 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, + 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, + 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, + 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, + 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, + 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, + 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, + 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, + 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, + 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, + 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, + 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, + 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, + 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, + 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, + 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, + 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, + 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, + 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, + 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, + 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, + 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, + 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, + 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, + 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, + 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, + 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, + 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, + 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, + 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, + 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, + 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, + 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto index 70b82a4dcd..4d4fb378f5 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -101,6 +101,8 @@ message DescriptorProto { message ExtensionRange { optional int32 start = 1; optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; } repeated ExtensionRange extension_range = 5; @@ -121,6 +123,14 @@ message DescriptorProto { repeated string reserved_name = 10; } +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + // Describes a field within a message. message FieldDescriptorProto { enum Type { @@ -351,7 +361,7 @@ message FileOptions { optional bool cc_generic_services = 16 [default=false]; optional bool java_generic_services = 17 [default=false]; optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 19 [default=false]; + optional bool php_generic_services = 42 [default=false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -483,13 +493,15 @@ message FieldOptions { // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. optional JSType jstype = 6 [default = JS_NORMAL]; enum JSType { // Use the default type. diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index 6c9a6cf745..f34601723d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -62,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 9bd3f50a45..c748667623 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -74,6 +74,16 @@ option objc_class_prefix = "GPB"; // any.Unpack(foo) // ... // +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' diff --git a/vendor/github.com/google/go-github/github/apps_installation.go b/vendor/github.com/google/go-github/github/apps_installation.go index 6a27799531..5c932919ff 100644 --- a/vendor/github.com/google/go-github/github/apps_installation.go +++ b/vendor/github.com/google/go-github/github/apps_installation.go @@ -5,7 +5,10 @@ package github -import "context" +import ( + "context" + "fmt" +) // Installation represents a GitHub Apps installation. type Installation struct { @@ -47,3 +50,35 @@ func (s *AppsService) ListRepos(ctx context.Context, opt *ListOptions) ([]*Repos return r.Repositories, resp, nil } + +// AddRepository adds a single repository to an installation. +// +// GitHub API docs: https://developer.github.com/v3/apps/installations/#add-repository-to-installation +func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int) (*Repository, *Response, error) { + u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID) + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, nil, err + } + + r := new(Repository) + resp, err := s.client.Do(ctx, req, r) + if err != nil { + return nil, resp, err + } + + return r, resp, nil +} + +// RemoveRepository removes a single repository from an installation. +// +// GitHub docs: https://developer.github.com/v3/apps/installations/#remove-repository-from-installation +func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int) (*Response, error) { + u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go index 4fed8e50a6..6da009baf1 100644 --- a/vendor/github.com/google/go-github/github/github-accessors.go +++ b/vendor/github.com/google/go-github/github/github-accessors.go @@ -2100,6 +2100,14 @@ func (i *Issue) GetComments() int { return *i.Comments } +// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise. +func (i *Issue) GetCommentsURL() string { + if i == nil || i.CommentsURL == nil { + return "" + } + return *i.CommentsURL +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (i *Issue) GetCreatedAt() time.Time { if i == nil || i.CreatedAt == nil { @@ -2108,6 +2116,14 @@ func (i *Issue) GetCreatedAt() time.Time { return *i.CreatedAt } +// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise. +func (i *Issue) GetEventsURL() string { + if i == nil || i.EventsURL == nil { + return "" + } + return *i.EventsURL +} + // GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. func (i *Issue) GetHTMLURL() string { if i == nil || i.HTMLURL == nil { @@ -2124,6 +2140,14 @@ func (i *Issue) GetID() int { return *i.ID } +// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise. +func (i *Issue) GetLabelsURL() string { + if i == nil || i.LabelsURL == nil { + return "" + } + return *i.LabelsURL +} + // GetLocked returns the Locked field if it's non-nil, zero value otherwise. func (i *Issue) GetLocked() bool { if i == nil || i.Locked == nil { @@ -2140,6 +2164,14 @@ func (i *Issue) GetNumber() int { return *i.Number } +// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise. +func (i *Issue) GetRepositoryURL() string { + if i == nil || i.RepositoryURL == nil { + return "" + } + return *i.RepositoryURL +} + // GetState returns the State field if it's non-nil, zero value otherwise. func (i *Issue) GetState() string { if i == nil || i.State == nil { diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go index 99ad8d078d..b33f70a412 100644 --- a/vendor/github.com/google/go-github/github/github.go +++ b/vendor/github.com/google/go-github/github/github.go @@ -27,7 +27,7 @@ import ( ) const ( - libraryVersion = "11" + libraryVersion = "13" defaultBaseURL = "https://api.github.com/" uploadBaseURL = "https://uploads.github.com/" userAgent = "go-github/" + libraryVersion @@ -252,7 +252,9 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ var buf io.ReadWriter if body != nil { buf = new(bytes.Buffer) - err := json.NewEncoder(buf).Encode(body) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(body) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go index b437d5063a..9b7a9d6879 100644 --- a/vendor/github.com/google/go-github/github/issues.go +++ b/vendor/github.com/google/go-github/github/issues.go @@ -23,6 +23,7 @@ type IssuesService service // but not every issue is a pull request. Some endpoints, events, and webhooks // may also return pull requests via this struct. If PullRequestLinks is nil, // this is an issue, and if PullRequestLinks is not nil, this is a pull request. +// The IsPullRequest helper method can be used to check that. type Issue struct { ID *int `json:"id,omitempty"` Number *int `json:"number,omitempty"` @@ -40,6 +41,10 @@ type Issue struct { ClosedBy *User `json:"closed_by,omitempty"` URL *string `json:"url,omitempty"` HTMLURL *string `json:"html_url,omitempty"` + CommentsURL *string `json:"comments_url,omitempty"` + EventsURL *string `json:"events_url,omitempty"` + LabelsURL *string `json:"labels_url,omitempty"` + RepositoryURL *string `json:"repository_url,omitempty"` Milestone *Milestone `json:"milestone,omitempty"` PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"` Repository *Repository `json:"repository,omitempty"` @@ -55,6 +60,13 @@ func (i Issue) String() string { return Stringify(i) } +// IsPullRequest reports whether the issue is also a pull request. It uses the +// method recommended by GitHub's API documentation, which is to check whether +// PullRequestLinks is non-nil. +func (i Issue) IsPullRequest() bool { + return i.PullRequestLinks != nil +} + // IssueRequest represents a request to create/edit an issue. // It is separate from Issue above because otherwise Labels // and Assignee fail to serialize to the correct JSON. @@ -97,7 +109,7 @@ type IssueListOptions struct { } // PullRequestLinks object is added to the Issue object when it's an issue included -// in the IssueCommentEvent webhook payload, if the webhooks is fired by a comment on a PR +// in the IssueCommentEvent webhook payload, if the webhook is fired by a comment on a PR. type PullRequestLinks struct { URL *string `json:"url,omitempty"` HTMLURL *string `json:"html_url,omitempty"` diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go index c0f315a778..3a4d46440a 100644 --- a/vendor/github.com/google/go-github/github/messages.go +++ b/vendor/github.com/google/go-github/github/messages.go @@ -20,6 +20,7 @@ import ( "hash" "io/ioutil" "net/http" + "net/url" "strings" ) @@ -122,6 +123,8 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) { // ValidatePayload validates an incoming GitHub Webhook event request // and returns the (JSON) payload. +// The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded". +// If the Content-Type is neither then an error is returned. // secretKey is the GitHub Webhook secret message. // // Example usage: @@ -133,13 +136,43 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) { // } // func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err error) { - payload, err = ioutil.ReadAll(r.Body) - if err != nil { - return nil, err + var body []byte // Raw body that GitHub uses to calculate the signature. + + switch ct := r.Header.Get("Content-Type"); ct { + case "application/json": + var err error + if body, err = ioutil.ReadAll(r.Body); err != nil { + return nil, err + } + + // If the content type is application/json, + // the JSON payload is just the original body. + payload = body + + case "application/x-www-form-urlencoded": + // payloadFormParam is the name of the form parameter that the JSON payload + // will be in if a webhook has its content type set to application/x-www-form-urlencoded. + const payloadFormParam = "payload" + + var err error + if body, err = ioutil.ReadAll(r.Body); err != nil { + return nil, err + } + + // If the content type is application/x-www-form-urlencoded, + // the JSON payload will be under the "payload" form param. + form, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + payload = []byte(form.Get(payloadFormParam)) + + default: + return nil, fmt.Errorf("Webhook request has unsupported Content-Type %q", ct) } sig := r.Header.Get(signatureHeader) - if err := validateSignature(sig, payload, secretKey); err != nil { + if err := validateSignature(sig, body, secretKey); err != nil { return nil, err } return payload, nil diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go index f1209c7c41..d0ea6a985e 100644 --- a/vendor/github.com/google/go-github/github/orgs_members.go +++ b/vendor/github.com/google/go-github/github/orgs_members.go @@ -278,7 +278,7 @@ func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, or // ListPendingOrgInvitations returns a list of pending invitations. // // GitHub API docs: https://developer.github.com/v3/orgs/members/#list-pending-organization-invitations -func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org int, opt *ListOptions) ([]*Invitation, *Response, error) { +func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opt *ListOptions) ([]*Invitation, *Response, error) { u := fmt.Sprintf("orgs/%v/invitations", org) u, err := addOptions(u, opt) if err != nil { diff --git a/vendor/github.com/google/go-github/github/orgs_teams.go b/vendor/github.com/google/go-github/github/orgs_teams.go index 684e2dafff..a14132210b 100644 --- a/vendor/github.com/google/go-github/github/orgs_teams.go +++ b/vendor/github.com/google/go-github/github/orgs_teams.go @@ -196,6 +196,9 @@ func (s *OrganizationsService) ListTeamMembers(ctx context.Context, team int, op // IsTeamMember checks if a user is a member of the specified team. // // GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-member +// +// Deprecated: This API has been marked as deprecated in the Github API docs, +// OrganizationsService.GetTeamMembership method should be used instead. func (s *OrganizationsService) IsTeamMember(ctx context.Context, team int, user string) (bool, *Response, error) { u := fmt.Sprintf("teams/%v/members/%v", team, user) req, err := s.client.NewRequest("GET", u, nil) diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go index 89685c9d70..a9b143df3d 100644 --- a/vendor/github.com/google/go-github/github/projects.go +++ b/vendor/github.com/google/go-github/github/projects.go @@ -65,6 +65,12 @@ type ProjectOptions struct { Name string `json:"name,omitempty"` // The body of the project. (Optional.) Body string `json:"body,omitempty"` + + // The following field(s) are only applicable for update. + // They should be left with zero values for creation. + + // State of the project. Either "open" or "closed". (Optional.) + State string `json:"state,omitempty"` } // UpdateProject updates a repository project. diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go index 3846335b3a..e3fe26fc26 100644 --- a/vendor/github.com/google/go-github/github/repos.go +++ b/vendor/github.com/google/go-github/github/repos.go @@ -232,8 +232,6 @@ func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opt *Re type RepositoryListAllOptions struct { // ID of the last repository seen Since int `url:"since,omitempty"` - - ListOptions } // ListAll lists all GitHub repositories in the order that they were created. @@ -558,20 +556,24 @@ type RequiredStatusChecks struct { // PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. type PullRequestReviewsEnforcement struct { - // Specifies which users and teams can dismiss pull requets reviews. + // Specifies which users and teams can dismiss pull request reviews. DismissalRestrictions DismissalRestrictions `json:"dismissal_restrictions"` // Specifies if approved reviews are dismissed automatically, when a new commit is pushed. DismissStaleReviews bool `json:"dismiss_stale_reviews"` + // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. + RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` } // PullRequestReviewsEnforcementRequest represents request to set the pull request review // enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above // because the request structure is different from the response structure. type PullRequestReviewsEnforcementRequest struct { - // Specifies which users and teams should be allowed to dismiss pull requets reviews. Can be nil to disable the restrictions. + // Specifies which users and teams should be allowed to dismiss pull request reviews. Can be nil to disable the restrictions. DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions"` // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required) DismissStaleReviews bool `json:"dismiss_stale_reviews"` + // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. + RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"` } // MarshalJSON implements the json.Marshaler interface. @@ -581,18 +583,22 @@ func (req PullRequestReviewsEnforcementRequest) MarshalJSON() ([]byte, error) { newReq := struct { R []interface{} `json:"dismissal_restrictions"` D bool `json:"dismiss_stale_reviews"` + O bool `json:"require_code_owner_reviews"` }{ R: []interface{}{}, D: req.DismissStaleReviews, + O: req.RequireCodeOwnerReviews, } return json.Marshal(newReq) } newReq := struct { R *DismissalRestrictionsRequest `json:"dismissal_restrictions"` D bool `json:"dismiss_stale_reviews"` + O bool `json:"require_code_owner_reviews"` }{ R: req.DismissalRestrictionsRequest, D: req.DismissStaleReviews, + O: req.RequireCodeOwnerReviews, } return json.Marshal(newReq) } @@ -601,10 +607,12 @@ func (req PullRequestReviewsEnforcementRequest) MarshalJSON() ([]byte, error) { // enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above // because the patch request does not require all fields to be initialized. type PullRequestReviewsEnforcementUpdate struct { - // Specifies which users and teams can dismiss pull requets reviews. Can be ommitted. + // Specifies which users and teams can dismiss pull request reviews. Can be omitted. DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` - // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be ommited. + // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted. DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` + // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. + RequireCodeOwnerReviews bool `json:"require_code_owner_reviews,omitempty"` } // AdminEnforcement represents the configuration to enforce required status checks for repository administrators. diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index ac57415c15..533b245578 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -80,7 +80,8 @@ type AgentCheckRegistration struct { // AgentServiceCheck is used to define a node or service level check type AgentServiceCheck struct { - Script string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + Script string `json:",omitempty"` // Deprecated, use Args. DockerContainerID string `json:",omitempty"` Shell string `json:",omitempty"` // Only supported for Docker. Interval string `json:",omitempty"` diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 31efecca03..97a524b5ee 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -446,6 +446,7 @@ func NewClient(config *Config) (*Client, error) { if len(parts) == 2 { switch parts[0] { case "http": + config.Scheme = "http" case "https": config.Scheme = "https" case "unix": @@ -466,10 +467,7 @@ func NewClient(config *Config) (*Client, error) { config.Token = defConfig.Token } - client := &Client{ - config: *config, - } - return client, nil + return &Client{config: *config}, nil } // NewHttpClient returns an http client configured with the given Transport and TLS @@ -557,13 +555,20 @@ func durToMsec(dur time.Duration) string { // serverError is a string we look for to detect 500 errors. const serverError = "Unexpected response code: 500" -// IsServerError returns true for 500 errors from the Consul servers, these are -// usually retryable at a later time. -func IsServerError(err error) bool { +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { if err == nil { return false } + if _, ok := err.(net.Error); ok { + return true + } + // TODO (slackpad) - Make a real error type here instead of using // a string check. return strings.Contains(err.Error(), serverError) @@ -656,7 +661,7 @@ func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { } start := time.Now() resp, err := c.config.HttpClient.Do(req) - diff := time.Now().Sub(start) + diff := time.Since(start) return diff, resp, err } diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index f91bb50fce..97f5156855 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -252,7 +252,7 @@ func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOpti if _, err := io.Copy(&buf, resp.Body); err != nil { return false, nil, fmt.Errorf("Failed to read response: %v", err) } - res := strings.Contains(string(buf.Bytes()), "true") + res := strings.Contains(buf.String(), "true") return res, qm, nil } @@ -296,7 +296,7 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption if _, err := io.Copy(&buf, resp.Body); err != nil { return false, nil, fmt.Errorf("Failed to read response: %v", err) } - res := strings.Contains(string(buf.Bytes()), "true") + res := strings.Contains(buf.String(), "true") return res, qm, nil } @@ -353,19 +353,19 @@ type TxnResponse struct { // // Here's an example: // -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// } -// ok, response, _, err := kv.Txn(&ops, nil) +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) // // If there is a problem making the transaction request then an error will be // returned. Otherwise, the ok value will be true if the transaction succeeded diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index 466ef5fdf1..41f72e7d23 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -180,7 +180,7 @@ WAIT: // Handle the one-shot mode. if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) if elapsed > qOpts.WaitTime { return nil, nil } @@ -370,7 +370,7 @@ RETRY: // by doing retries. Note that we have to attempt the retry in a non- // blocking fashion so that we have a clean place to reset the retry // counter if service is restored. - if retries > 0 && IsServerError(err) { + if retries > 0 && IsRetryableError(err) { time.Sleep(l.opts.MonitorRetryTime) retries-- opts.WaitIndex = 0 diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go index 7b0e461e96..a630b694cd 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -25,6 +25,10 @@ type Area struct { // RetryJoin specifies the address of Consul servers to join to, such as // an IPs or hostnames with an optional port number. This is optional. RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool } // AreaJoinResponse is returned when a join occurs and gives the result for each @@ -100,6 +104,27 @@ func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, return out.ID, wm, nil } +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + // AreaGet returns a single network area. func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { var out []*Area diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go index 0fa9d16040..b179406dc1 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -196,7 +196,7 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W if _, err := io.Copy(&buf, resp.Body); err != nil { return false, fmt.Errorf("Failed to read response: %v", err) } - res := strings.Contains(string(buf.Bytes()), "true") + res := strings.Contains(buf.String(), "true") return res, nil } diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index 5f3c25b131..a9844df2dd 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -17,6 +17,9 @@ type RaftServer struct { // Leader is true if this server is the current cluster leader. Leader bool + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + // Voter is true if this server has a vote in the cluster. This might // be false if the server is staging and still coming online, or if // it's a non-voting server, which will be added in a future release of @@ -24,7 +27,7 @@ type RaftServer struct { Voter bool } -// RaftConfigration is returned when querying for the current Raft configuration. +// RaftConfiguration is returned when querying for the current Raft configuration. type RaftConfiguration struct { // Servers has the list of servers in the Raft configuration. Servers []*RaftServer diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go index 9ddbdc49e7..d0c5741778 100644 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -198,7 +198,7 @@ WAIT: // Handle the one-shot mode. if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Now().Sub(start) + elapsed := time.Since(start) if elapsed > qOpts.WaitTime { return nil, nil } @@ -492,7 +492,7 @@ RETRY: // by doing retries. Note that we have to attempt the retry in a non- // blocking fashion so that we have a clean place to reset the retry // counter if service is restored. - if retries > 0 && IsServerError(err) { + if retries > 0 && IsRetryableError(err) { time.Sleep(s.opts.MonitorRetryTime) retries-- opts.WaitIndex = 0 diff --git a/vendor/github.com/hashicorp/consul/lib/eof.go b/vendor/github.com/hashicorp/consul/lib/eof.go new file mode 100644 index 0000000000..f77844fd64 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/lib/eof.go @@ -0,0 +1,27 @@ +package lib + +import ( + "io" + "strings" + + "github.com/hashicorp/yamux" +) + +var yamuxStreamClosed = yamux.ErrStreamClosed.Error() +var yamuxSessionShutdown = yamux.ErrSessionShutdown.Error() + +// IsErrEOF returns true if we get an EOF error from the socket itself, or +// an EOF equivalent error from yamux. +func IsErrEOF(err error) bool { + if err == io.EOF { + return true + } + + errStr := err.Error() + if strings.Contains(errStr, yamuxStreamClosed) || + strings.Contains(errStr, yamuxSessionShutdown) { + return true + } + + return false +} diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go index c8609c3575..20adcfbb93 100644 --- a/vendor/github.com/hashicorp/go-hclog/int.go +++ b/vendor/github.com/hashicorp/go-hclog/int.go @@ -40,8 +40,13 @@ func New(opts *LoggerOptions) Logger { level = DefaultLevel } + mtx := opts.Mutex + if mtx == nil { + mtx = new(sync.Mutex) + } + return &intLogger{ - m: new(sync.Mutex), + m: mtx, json: opts.JSONFormat, caller: opts.IncludeLocation, name: opts.Name, @@ -369,6 +374,8 @@ func (z *intLogger) Named(name string) Logger { if nz.name != "" { nz.name = nz.name + "." + name + } else { + nz.name = name } return &nz diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go index 1b36f81126..dbc4198a5a 100644 --- a/vendor/github.com/hashicorp/go-hclog/log.go +++ b/vendor/github.com/hashicorp/go-hclog/log.go @@ -5,6 +5,7 @@ import ( "log" "os" "strings" + "sync" ) var ( @@ -130,6 +131,9 @@ type LoggerOptions struct { // Where to write the logs to. Defaults to os.Stdout if nil Output io.Writer + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + // Control if the output should be in JSON. JSONFormat bool diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index b88f322a84..bed9ebbe14 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -573,7 +573,11 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) // Compile the list of all the fields that we're going to be decoding // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] @@ -616,7 +620,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) } // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) + fields = append(fields, field{fieldType, structVal.Field(i)}) } } @@ -624,26 +628,27 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) decodedFields := make([]string, 0, len(fields)) decodedFieldsVal := make([]reflect.Value, 0) unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. - if !field.CanSet() { + if !fieldValue.CanSet() { continue } - fieldName := fieldType.Name + fieldName := field.Name - tagValue := fieldType.Tag.Get(tagName) + tagValue := field.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) + decodedFieldsVal = append(decodedFieldsVal, fieldValue) continue case "key": if item == nil { @@ -654,10 +659,10 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) } } - field.SetString(item.Keys[0].Token.Value().(string)) + fieldValue.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) + unusedKeysVal = append(unusedKeysVal, fieldValue) continue } } @@ -684,7 +689,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { return err } } @@ -694,12 +699,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) decodeNode = &ast.ObjectList{Items: ot.List.Items} } - if err := d.decode(fieldName, decodeNode, field); err != nil { + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { return err } } - decodedFields = append(decodedFields, fieldType.Name) + decodedFields = append(decodedFields, field.Name) } if len(decodedFieldsVal) > 0 { diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index 69662367f0..6601ef76e6 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -351,7 +351,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type { return token.NUMBER } -// scanMantissa scans the mantissa begining from the rune. It returns the next +// scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go index dd5c72bb3d..fe3f0f0950 100644 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -246,7 +246,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type { return token.NUMBER } -// scanMantissa scans the mantissa begining from the rune. It returns the next +// scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go index 63f6241411..403ec78014 100644 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -205,6 +205,11 @@ func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coo return nil, err } + const maxRTT = 10 * time.Second + if rtt <= 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + rttSeconds := c.latencyFilter(node, rtt.Seconds()) c.updateVivaldi(other, rttSeconds) c.updateAdjustment(other, rttSeconds) diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.lock deleted file mode 100644 index 53632775ff..0000000000 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.lock +++ /dev/null @@ -1,207 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "cloud.google.com/go" - packages = ["compute/metadata"] - revision = "5a9e19d4e1e41a734154e44a2132b358afb49a03" - version = "v0.13.0" - -[[projects]] - name = "github.com/SermoDigital/jose" - packages = [".","crypto","jws","jwt"] - revision = "f6df55f235c24f236d11dbcf665249a59ac2021f" - version = "1.1" - -[[projects]] - branch = "master" - name = "github.com/armon/go-radix" - packages = ["."] - revision = "1fca145dffbcaa8fe914309b1ec0cfc67500fe61" - -[[projects]] - name = "github.com/fatih/structs" - packages = ["."] - revision = "a720dfa8df582c51dee1b36feabb906bde1588bd" - version = "v1.0" - -[[projects]] - branch = "master" - name = "github.com/golang/protobuf" - packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] - revision = "17ce1425424ab154092bbb43af630bd647f3bb0d" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "553a641470496b2327abcac10b36396bd98e45c9" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/errwrap" - packages = ["."] - revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - revision = "3573b8b52aa7b37b9358d966a898feb387f62437" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-hclog" - packages = ["."] - revision = "8105cc0a3736cc153a2025f5d0d91b80045fc9ff" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-multierror" - packages = ["."] - revision = "83588e72410abfbe4df460eeb6f30841ae47d4c4" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-plugin" - packages = ["."] - revision = "3e6d191694b5a3a2b99755f31b47fa209e4bcd09" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-uuid" - packages = ["."] - revision = "64130c7a86d732268a38cb04cfbaf0cc987fda98" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/hcl" - packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"] - revision = "68e816d1c783414e79bc65b3994d9ab6b0a722ab" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/vault" - packages = ["api","helper/certutil","helper/compressutil","helper/consts","helper/errutil","helper/jsonutil","helper/logformat","helper/mlock","helper/parseutil","helper/pluginutil","helper/policyutil","helper/salt","helper/strutil","helper/wrapping","logical","logical/framework","logical/plugin","version"] - revision = "27197f728e7fc8bffc9eaa59e8af4e0766b81320" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/yamux" - packages = ["."] - revision = "d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd" - -[[projects]] - name = "github.com/mattn/go-colorable" - packages = ["."] - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - name = "github.com/mattn/go-isatty" - packages = ["."] - revision = "fc9e8d8ef48496124e79ae0df75490096eccf6fe" - version = "v0.0.2" - -[[projects]] - branch = "master" - name = "github.com/mgutz/ansi" - packages = ["."] - revision = "9520e82c474b0a04dd04f8a40959027271bab992" - -[[projects]] - name = "github.com/mgutz/logxi" - packages = ["v1"] - revision = "aebf8a7d67ab4625e0fd4a665766fef9a709161b" - version = "v1" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-testing-interface" - packages = ["."] - revision = "7bf6f6eaf1bed2fd3c6c63114b18cb64facb9de2" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "d0303fe809921458f417bcf828397a65db30a7e4" - -[[projects]] - branch = "master" - name = "github.com/sethgrid/pester" - packages = ["."] - revision = "a86a2d88f4dc3c7dbf3a6a6bbbfb095690b834b6" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] - revision = "859d1a86bb617c0c20d154590c3c5d3fcb670b07" - -[[projects]] - branch = "master" - name = "golang.org/x/oauth2" - packages = [".","google","internal","jws","jwt"] - revision = "13449ad91cb26cb47661c1b080790392170385fd" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix"] - revision = "062cd7e4e68206d8bab9b18396626e855c992658" - -[[projects]] - branch = "master" - name = "golang.org/x/text" - packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] - revision = "ab5ac5f9a8deb4855a60fab02bc61a4ec770bd49" - -[[projects]] - branch = "master" - name = "google.golang.org/api" - packages = ["compute/v1","gensupport","googleapi","googleapi/internal/uritemplates","iam/v1","oauth2/v2"] - revision = "519500316f39a9934d1d88615a1a047035a4bae5" - -[[projects]] - name = "google.golang.org/appengine" - packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"] - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "595979c8a7bf586b2d293fb42246bf91a0b893d9" - -[[projects]] - name = "google.golang.org/grpc" - packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"] - revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3" - version = "v1.6.0" - -[[projects]] - name = "gopkg.in/square/go-jose.v2" - packages = [".","cipher","json","jwt"] - revision = "b25e6cab129e4a54675b42ea49d38e9c33ade9e6" - version = "v2.1.2" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "f9ed330c7039eae92bba02edc9d8463bb8f3dd93229e287004acc600825f9e92" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.toml b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.toml deleted file mode 100644 index f30b62f1db..0000000000 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Gopkg.toml +++ /dev/null @@ -1,46 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/SermoDigital/jose" - version = "1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/hashicorp/go-cleanhttp" - -[[constraint]] - name = "github.com/hashicorp/vault" - branch = "master" - -[[constraint]] - name = "github.com/mgutz/logxi" - version = "1.0.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/oauth2" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Makefile b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Makefile deleted file mode 100644 index 221d9f79db..0000000000 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/Makefile +++ /dev/null @@ -1,58 +0,0 @@ -TOOL?=vault-gcp-auth-plugin -TEST?=$$(go list ./... | grep -v /vendor/) -VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -EXTERNAL_TOOLS=\ - github.com/mitchellh/gox \ - github.com/kardianos/govendor -BUILD_TAGS?=${TOOL} -GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) - -# bin generates the releaseable binaries for this plugin -bin: fmtcheck generate - @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" - -default: dev - -# dev creates binaries for testing Vault locally. These are put -# into ./bin/ as well as $GOPATH/bin, except for quickdev which -# is only put into /bin/ -quickdev: generate - @CGO_ENABLED=0 go build -i -tags='$(BUILD_TAGS)' -o bin/vault-gcp-auth-plugin -dev: fmtcheck generate - @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" -dev-dynamic: generate - @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -testcompile: fmtcheck generate - @for pkg in $(TEST) ; do \ - go test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ - done - -# test runs all tests -test: fmtcheck generate - @if [ "$(TEST)" = "./..." ]; then \ - echo "ERROR: Set TEST to a specific package"; \ - exit 1; \ - fi - VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m - -# generate runs `go generate` to build the dynamically generated -# source files. -generate: - go generate $(go list ./... | grep -v /vendor/) - -# bootstrap the build by downloading additional tools -bootstrap: - @for tool in $(EXTERNAL_TOOLS) ; do \ - echo "Installing/Updating $$tool" ; \ - go get -u $$tool; \ - done - -fmtcheck: - @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" - -fmt: - gofmt -w $(GOFMT_FILES) - - -.PHONY: bin default generate test vet bootstrap fmt fmtcheck diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/README.md b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/README.md deleted file mode 100644 index adea5a2490..0000000000 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# Vault Plugin: Google Cloud Platform Auth Backend - -This is a standalone backend plugin for use with [Hashicorp Vault](https://www.github.com/hashicorp/vault). -This plugin allows for various GCP entities to authenticate with Vault. -This is currently included in Vault distributions. - -Currently, this plugin supports login for: -- IAM service accounts -- GCE Instances - -**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). - -## Quick Links - -- [Vault Website](https://www.vaultproject.io) -- [GCP Auth BE Docs](https://www.vaultproject.io/docs/auth/gcp.html) -- [Vault Github](https://www.github.com/hashicorp/vault) -- [General Announcement List](https://groups.google.com/forum/#!forum/hashicorp-announce) -- [Discussion List](https://groups.google.com/forum/#!forum/vault-tool) - - -## Getting Started - -This is a [Vault plugin](https://www.vaultproject.io/docs/internals/plugins.html) -and is meant to work with Vault. This guide assumes you have already installed Vault -and have a basic understanding of how Vault works. - -Otherwise, first read this guide on how to [get started with Vault](https://www.vaultproject.io/intro/getting-started/install.html). - -To learn specifically about how plugins work, see documentation on [Vault plugins](https://www.vaultproject.io/docs/internals/plugins.html). - -### Usage - -Please see [documentation for the plugin](https://www.vaultproject.io/docs/auth/gcp.html) -on the Vault website. - -This plugin is currently built into Vault and by default is accessed -at `auth/gcp`. To enable this in a running Vault server: - -```sh -$ vault auth-enable 'gcp' -Successfully enabled 'gcp' at 'gcp'! -``` - -To see all the supported paths, see the [GCP auth backend docs](https://www.vaultproject.io/docs/auth/gcp.html). - -## Developing - -If you wish to work on this plugin, you'll first need -[Go](https://www.golang.org) installed on your machine -(version 1.8+ is *required*). - -For local dev first make sure Go is properly installed, including -setting up a [GOPATH](https://golang.org/doc/code.html#GOPATH). -Next, clone this repository into -`$GOPATH/src/github.com/hashicorp/vault-gcp-auth-plugin`. -You can then download any required build tools by bootstrapping your -environment: - -```sh -$ make bootstrap -``` - -To compile a development version of this plugin, run `make` or `make dev`. -This will put the plugin binary in the `bin` and `$GOPATH/bin` folders. `dev` -mode will only generate the binary for your platform and is faster: - -```sh -$ make -$ make dev -``` - -Put the plugin binary into a location of your choice. This directory -will be specified as the [`plugin_directory`](https://www.vaultproject.io/docs/configuration/index.html#plugin_directory) -in the Vault config used to start the server. - -```json -... -plugin_directory = "path/to/plugin/directory" -... -``` - -Start a Vault server with this config file: -```sh -$ vault server -config=path/to/config.json ... -... -``` - -Once the server is started, register the plugin in the Vault server's [plugin catalog](https://www.vaultproject.io/docs/internals/plugins.html#plugin-catalog): - -```sh -$ vault write sys/plugins/catalog/mygcpplugin \ - sha_256= \ - command="vault-plugin-auth-gcp" -... -Success! Data written to: sys/plugins/catalog/mygcpplugin -``` - -Note you should generate a new sha256 checksum if you have made changes -to the plugin. Example using openssl: - -```sh -openssl dgst -sha256 $GOPATH/vault-plugin-gcp-auth -... -SHA256(.../go/bin/vault-plugin-auth-gcp)= 896c13c0f5305daed381952a128322e02bc28a57d0c862a78cbc2ea66e8c6fa1 -``` - -Any name can be substituted for the plugin name "mygcpplugin". This -name will be referenced in the next step, where we enable the auth -plugin backend using the GCP auth plugin: - -```sh -$ vault auth-enable -plugin-name='mygcpplugin' -path='gcp' plugin -... - -Successfully enabled 'plugin' at 'gcp'! -``` - -#### Tests - -This plugin has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) -covering most of the features of this auth backend. - -If you are developing this plugin and want to verify it is still -functioning (and you haven't broken anything else), we recommend -running the acceptance tests. - -Acceptance tests typically require other environment variables to be set for -things such as access keys. The test itself should error early and tell -you what to set, so it is not documented here. - -**Warning:** The acceptance tests create/destroy/modify *real resources*, -which may incur real costs in some cases. In the presence of a bug, -it is technically possible that broken backends could leave dangling -data behind. Therefore, please run the acceptance tests at your own risk. -At the very least, we recommend running them in their own private -account for whatever backend you're testing. - -To run the acceptance tests, invoke `make test`: - -```sh -$ make test -``` - -You can also specify a `TESTARGS` variable to filter tests like so: - -```sh -$ make test TESTARGS='--run=TestConfig' -``` diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/main.go b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/main.go deleted file mode 100644 index 2bcda677e0..0000000000 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/main.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "log" - "os" - - gcpbackend "github.com/hashicorp/vault-plugin-auth-gcp/plugin" - "github.com/hashicorp/vault/helper/pluginutil" - "github.com/hashicorp/vault/logical/plugin" -) - -func main() { - apiClientMeta := &pluginutil.APIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := pluginutil.VaultPluginTLSProvider(tlsConfig) - - err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: gcpbackend.Factory, - TLSProviderFunc: tlsProviderFunc, - }) - if err != nil { - log.Println(err) - os.Exit(1) - } -} diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_login.go b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_login.go index b6a75da3a9..52d750d1ea 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_login.go +++ b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/path_login.go @@ -563,7 +563,7 @@ func (b *GcpAuthBackend) authorizeGCEInstance(instance *compute.Instance, s logi serviceAccount, err := util.ServiceAccount(iamClient, serviceAccountId, role.ProjectId) if err != nil { - return fmt.Errorf("could not find service acocunt with id '%s': ") + return fmt.Errorf("could not find service account with id '%s': %v", serviceAccountId, err) } if !(strutil.StrListContains(role.BoundServiceAccounts, serviceAccount.Email) || diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/util/iamutil.go b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/util/iamutil.go index 9d1d2011a0..1854178c71 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/util/iamutil.go +++ b/vendor/github.com/hashicorp/vault-plugin-auth-gcp/plugin/util/iamutil.go @@ -52,7 +52,7 @@ func ServiceAccountKey(iamClient *iam.Service, keyId, accountId, projectName str keyResource := fmt.Sprintf(serviceAccountKeyTemplate, projectName, accountId, keyId) key, err := iamClient.Projects.ServiceAccounts.Keys.Get(keyResource).PublicKeyType(serviceAccountKeyFileType).Do() if err != nil { - return nil, fmt.Errorf("service account key '%s' does not exist", keyResource) + return nil, fmt.Errorf("service account key '%s' does not exist: %v", keyResource, err) } return key, nil } diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock index 8eadaf561c..1afbdfd95c 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock +++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock @@ -143,7 +143,7 @@ branch = "master" name = "github.com/hashicorp/vault" packages = ["api","helper/certutil","helper/compressutil","helper/consts","helper/errutil","helper/jsonutil","helper/logformat","helper/mlock","helper/parseutil","helper/pluginutil","helper/policyutil","helper/salt","helper/strutil","helper/wrapping","logical","logical/framework","logical/plugin"] - revision = "27197f728e7fc8bffc9eaa59e8af4e0766b81320" + revision = "1c4baa56e9882449ed70c0021100336a3465ea58" [[projects]] branch = "master" diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go index 9513a09051..3d691ad5f9 100644 --- a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go +++ b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go @@ -94,6 +94,7 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return } + // Entry-level cleanup, just trim spaces. line = bytes.TrimFunc(line, ourIsSpace) if len(line) == 5 && line[0] == '=' { @@ -133,6 +134,18 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return 0, io.EOF } + // Clean-up line from whitespace to pass it further (to base64 + // decoder). This is done after test for CRC and test for + // armorEnd. Keys that have whitespace in CRC will have CRC + // treated as part of the payload and probably fail in base64 + // reading. + line = bytes.Map(func(r rune) rune { + if ourIsSpace(r) { + return -1 + } + return r + }, line) + n = copy(p, line) bytesToSave := len(line) - n if bytesToSave > 0 { diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md index 7670fc87a5..781c89eea6 100644 --- a/vendor/github.com/lib/pq/README.md +++ b/vendor/github.com/lib/pq/README.md @@ -1,5 +1,6 @@ # pq - A pure Go postgres driver for Go's database/sql package +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://godoc.org/github.com/lib/pq) [![Build Status](https://travis-ci.org/lib/pq.svg?branch=master)](https://travis-ci.org/lib/pq) ## Install diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go index 1725ab0d34..fadb88e5ea 100644 --- a/vendor/github.com/lib/pq/conn.go +++ b/vendor/github.com/lib/pq/conn.go @@ -35,8 +35,12 @@ var ( errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") ) +// Driver is the Postgres database driver. type Driver struct{} +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. func (d *Driver) Open(name string) (driver.Conn, error) { return Open(name) } @@ -78,6 +82,8 @@ func (s transactionStatus) String() string { panic("not reached") } +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. type Dialer interface { Dial(network, address string) (net.Conn, error) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) @@ -149,11 +155,7 @@ func (cn *conn) handleDriverSettings(o values) (err error) { if err != nil { return err } - err = boolSetting("binary_parameters", &cn.binaryParameters) - if err != nil { - return err - } - return nil + return boolSetting("binary_parameters", &cn.binaryParameters) } func (cn *conn) handlePgpass(o values) { @@ -165,11 +167,16 @@ func (cn *conn) handlePgpass(o values) { if filename == "" { // XXX this code doesn't work on Windows where the default filename is // XXX %APPDATA%\postgresql\pgpass.conf - user, err := user.Current() - if err != nil { - return + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir } - filename = filepath.Join(user.HomeDir, ".pgpass") + filename = filepath.Join(userHome, ".pgpass") } fileinfo, err := os.Stat(filename) if err != nil { @@ -237,10 +244,14 @@ func (cn *conn) writeBuf(b byte) *writeBuf { } } +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. func Open(name string) (_ driver.Conn, err error) { return DialOpen(defaultDialer{}, name) } +// DialOpen opens a new connection to the database using a dialer. func DialOpen(d Dialer, name string) (_ driver.Conn, err error) { // Handle any panics during connection initialization. Note that we // specifically do *not* want to use errRecover(), as that would turn any @@ -706,7 +717,7 @@ func (noRows) RowsAffected() (int64, error) { // Decides which column formats to use for a prepared statement. The input is // an array of type oids, one element per result column. -func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, colFmtData []byte) { +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { if len(colTyps) == 0 { return nil, colFmtDataAllText } @@ -718,8 +729,8 @@ func decideColumnFormats(colTyps []oid.Oid, forceText bool) (colFmts []format, c allBinary := true allText := true - for i, o := range colTyps { - switch o { + for i, t := range colTyps { + switch t.OID { // This is the list of types to use binary mode for when receiving them // through a prepared statement. If a type appears in this list, it // must also be implemented in binaryDecode in encode.go. @@ -1155,7 +1166,7 @@ type stmt struct { colNames []string colFmts []format colFmtData []byte - colTyps []oid.Oid + colTyps []fieldDesc paramTyps []oid.Oid closed bool } @@ -1318,7 +1329,7 @@ type rows struct { cn *conn finish func() colNames []string - colTyps []oid.Oid + colTyps []fieldDesc colFmts []format done bool rb readBuf @@ -1406,7 +1417,7 @@ func (rs *rows) Next(dest []driver.Value) (err error) { dest[i] = nil continue } - dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i], rs.colFmts[i]) + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) } return case 'T': @@ -1431,7 +1442,8 @@ func (rs *rows) NextResultSet() error { // // tblname := "my_table" // data := "my_data" -// err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", pq.QuoteIdentifier(tblname)), data) +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) // // Any double quotes in name will be escaped. The quoted identifier will be // case sensitive when used in a query. If the input string contains a zero @@ -1573,7 +1585,7 @@ func (cn *conn) readParseResponse() { } } -func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []oid.Oid) { +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { for { t, r := cn.recv1() switch t { @@ -1599,7 +1611,7 @@ func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames [ } } -func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []oid.Oid) { +func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) { t, r := cn.recv1() switch t { case 'T': @@ -1695,31 +1707,33 @@ func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, co } } -func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []oid.Oid) { +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { n := r.int16() colNames = make([]string, n) - colTyps = make([]oid.Oid, n) + colTyps = make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) - colTyps[i] = r.oid() - r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() // format code not known when describing a statement; always 0 r.next(2) } return } -func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []oid.Oid) { +func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) { n := r.int16() colNames = make([]string, n) colFmts = make([]format, n) - colTyps = make([]oid.Oid, n) + colTyps = make([]fieldDesc, n) for i := range colNames { colNames[i] = r.string() r.next(6) - colTyps[i] = r.oid() - r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() colFmts[i] = format(r.int16()) } return diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go index 6d252ecee2..b2c3582c84 100644 --- a/vendor/github.com/lib/pq/doc.go +++ b/vendor/github.com/lib/pq/doc.go @@ -11,7 +11,8 @@ using this package directly. For example: ) func main() { - db, err := sql.Open("postgres", "user=pqgotest dbname=pqgotest sslmode=verify-full") + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) if err != nil { log.Fatal(err) } @@ -23,7 +24,8 @@ using this package directly. For example: You can also connect to a database using a URL. For example: - db, err := sql.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full") + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) Connection String Parameters @@ -43,21 +45,28 @@ supported: * dbname - The name of the database to connect to * user - The user to sign in as * password - The user's password - * host - The host to connect to. Values that start with / are for unix domain sockets. (default is localhost) + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) * port - The port to bind to. (default is 5432) - * sslmode - Whether or not to use SSL (default is require, this is not the default for libpq) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) * fallback_application_name - An application_name to fall back to if one isn't provided. - * connect_timeout - Maximum wait for connection, in seconds. Zero or not specified means wait indefinitely. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. * sslcert - Cert file location. The file must contain PEM encoded data. * sslkey - Key file location. The file must contain PEM encoded data. - * sslrootcert - The location of the root certificate file. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. Valid values for sslmode are: * disable - No SSL * require - Always SSL (skip verification) - * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA) - * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING for more information about connection string parameters. @@ -68,7 +77,7 @@ Use single quotes for values that contain whitespace: A backslash will escape the next character in values: - "user=space\ man password='it\'s valid' + "user=space\ man password='it\'s valid'" Note that the connection parameter client_encoding (which sets the text encoding for the connection) may be set but must be "UTF8", @@ -129,7 +138,8 @@ This package returns the following types for values from the PostgreSQL backend: - integer types smallint, integer, and bigint are returned as int64 - floating-point types real and double precision are returned as float64 - character types char, varchar, and text are returned as string - - temporal types date, time, timetz, timestamp, and timestamptz are returned as time.Time + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time - the boolean type is returned as bool - the bytea type is returned as []byte @@ -229,7 +239,7 @@ for more information). Note that the channel name will be truncated to 63 bytes by the PostgreSQL server. You can find a complete, working example of Listener usage at -http://godoc.org/github.com/lib/pq/listen_example. +http://godoc.org/github.com/lib/pq/examples/listen. */ package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go index 88a322cda8..3b0d365f29 100644 --- a/vendor/github.com/lib/pq/encode.go +++ b/vendor/github.com/lib/pq/encode.go @@ -367,8 +367,15 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro timeSep := daySep + 3 day := p.mustAtoi(str, daySep+1, timeSep) + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + var hour, minute, second int - if len(str) > monSep+len("01-01")+1 { + if len(str) > minLen { p.expect(str, ' ', timeSep) minSep := timeSep + 3 p.expect(str, ':', minSep) @@ -424,7 +431,8 @@ func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, erro tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) } var isoYear int - if remainderIdx+3 <= len(str) && str[remainderIdx:remainderIdx+3] == " BC" { + + if isBC { isoYear = 1 - year remainderIdx += 3 } else { diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 09f94244b9..412c6ac1e2 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -60,7 +60,7 @@ type ListenerConn struct { replyChan chan message } -// Creates a new ListenerConn. Use NewListener instead. +// NewListenerConn creates a new ListenerConn. Use NewListener instead. func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { return newDialListenerConn(defaultDialer{}, name, notificationChan) } @@ -214,17 +214,17 @@ func (l *ListenerConn) listenerConnMain() { // this ListenerConn is done } -// Send a LISTEN query to the server. See ExecSimpleQuery. +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Listen(channel string) (bool, error) { return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) } -// Send an UNLISTEN query to the server. See ExecSimpleQuery. +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. func (l *ListenerConn) Unlisten(channel string) (bool, error) { return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) } -// Send `UNLISTEN *` to the server. See ExecSimpleQuery. +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. func (l *ListenerConn) UnlistenAll() (bool, error) { return l.ExecSimpleQuery("UNLISTEN *") } @@ -267,8 +267,8 @@ func (l *ListenerConn) sendSimpleQuery(q string) (err error) { return nil } -// Execute a "simple query" (i.e. one with no bindable parameters) on the -// connection. The possible return values are: +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: // 1) "executed" is true; the query was executed to completion on the // database server. If the query failed, err will be set to the error // returned by the database, otherwise err will be nil. @@ -333,6 +333,7 @@ func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { } } +// Close closes the connection. func (l *ListenerConn) Close() error { l.connectionLock.Lock() if l.err != nil { @@ -346,7 +347,7 @@ func (l *ListenerConn) Close() error { return l.cn.c.Close() } -// Err() returns the reason the connection was closed. It is not safe to call +// Err returns the reason the connection was closed. It is not safe to call // this function until l.Notify has been closed. func (l *ListenerConn) Err() error { return l.err @@ -354,32 +355,43 @@ func (l *ListenerConn) Err() error { var errListenerClosed = errors.New("pq: Listener has been closed") +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. var ErrChannelNotOpen = errors.New("pq: channel is not open") +// ListenerEventType is an enumeration of listener event types. type ListenerEventType int const ( - // Emitted only when the database connection has been initially - // initialized. err will always be nil. + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. ListenerEventConnected ListenerEventType = iota - // Emitted after a database connection has been lost, either because of an - // error or because Close has been called. err will be set to the reason - // the database connection was lost. + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. ListenerEventDisconnected - // Emitted after a database connection has been re-established after - // connection loss. err will always be nil. After this event has been - // emitted, a nil pq.Notification is sent on the Listener.Notify channel. + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. ListenerEventReconnected - // Emitted after a connection to the database was attempted, but failed. - // err will be set to an error describing why the connection attempt did - // not succeed. + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. ListenerEventConnectionAttemptFailed ) +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. type EventCallbackType func(event ListenerEventType, err error) // Listener provides an interface for listening to notifications from a @@ -454,9 +466,9 @@ func NewDialListener(d Dialer, return l } -// Returns the notification channel for this listener. This is the same -// channel as Notify, and will not be recreated during the life time of the -// Listener. +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. func (l *Listener) NotificationChannel() <-chan *Notification { return l.Notify } @@ -639,7 +651,7 @@ func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notificatio // close and then return the error message from the connection, as // per ListenerConn's interface. if err != nil { - for _ = range notificationChan { + for range notificationChan { } doneChan <- cn.Err() return diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go index a3390c23a8..ecc84c2c86 100644 --- a/vendor/github.com/lib/pq/oid/types.go +++ b/vendor/github.com/lib/pq/oid/types.go @@ -1,4 +1,4 @@ -// generated by 'go run gen.go'; do not edit +// Code generated by gen.go. DO NOT EDIT. package oid @@ -171,3 +171,173 @@ const ( T_regrole Oid = 4096 T__regrole Oid = 4097 ) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 0000000000..c6aa5b9a36 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index e81f1031b0..e5558ae39c 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -6,6 +6,7 @@ package jlexer import ( "encoding/base64" + "encoding/json" "errors" "fmt" "io" @@ -903,6 +904,10 @@ func (r *Lexer) UintStr() uint { return uint(r.Uint64Str()) } +func (r *Lexer) UintptrStr() uintptr { + return uintptr(r.Uint64Str()) +} + func (r *Lexer) Int8Str() int8 { s, b := r.unsafeString() if !r.Ok() { @@ -1043,6 +1048,28 @@ func (r *Lexer) GetNonFatalErrors() []*LexerError { return r.multipleErrors } +// JsonNumber fetches and json.Number from 'encoding/json' package. +// Both int, float or string, contains them are valid values +func (r *Lexer) JsonNumber() json.Number { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() { + r.errInvalidToken("json.Number") + return json.Number("0") + } + + switch r.token.kind { + case tokenString: + return json.Number(r.String()) + case tokenNumber: + return json.Number(r.Raw()) + default: + r.errSyntax() + return json.Number("0") + } +} + // Interface fetches an interface{} analogous to the 'encoding/json' package. func (r *Lexer) Interface() interface{} { if r.token.kind == tokenUndef && r.Ok() { diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index 7b55293a0f..250920d85b 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -196,6 +196,13 @@ func (w *Writer) Uint64Str(n uint64) { w.Buffer.Buf = append(w.Buffer.Buf, '"') } +func (w *Writer) UintptrStr(n uintptr) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + func (w *Writer) Int8Str(n int8) { w.Buffer.EnsureSpace(4) w.Buffer.Buf = append(w.Buffer.Buf, '"') diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go index 9d24bac1db..7384cf9916 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -1,5 +1,5 @@ // +build linux -// +build !appengine +// +build !appengine,!ppc64,!ppc64le package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go new file mode 100644 index 0000000000..44e5d21302 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go @@ -0,0 +1,19 @@ +// +build linux +// +build ppc64 ppc64le + +package isatty + +import ( + "unsafe" + + syscall "golang.org/x/sys/unix" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go index 07fbcb581a..31b42cadf8 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -19,14 +19,19 @@ import ( type T interface { Error(args ...interface{}) Errorf(format string, args ...interface{}) - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) Fail() FailNow() Failed() bool - Helper() + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) Log(args ...interface{}) Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() } // RuntimeT implements T and can be instantiated and run at runtime to @@ -34,7 +39,8 @@ type T interface { // for calls to Fatal. For calls to Error, you'll have to check the errors // list to determine whether to exit yourself. type RuntimeT struct { - failed bool + skipped bool + failed bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -43,20 +49,10 @@ func (t *RuntimeT) Error(args ...interface{}) { } func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) + log.Printf(format, args...) t.Fail() } -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.FailNow() -} - func (t *RuntimeT) Fail() { t.failed = true } @@ -69,7 +65,15 @@ func (t *RuntimeT) Failed() bool { return t.failed } -func (t *RuntimeT) Helper() {} +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} func (t *RuntimeT) Log(args ...interface{}) { log.Println(fmt.Sprintln(args...)) @@ -78,3 +82,27 @@ func (t *RuntimeT) Log(args ...interface{}) { func (t *RuntimeT) Logf(format string, args ...interface{}) { log.Println(fmt.Sprintf(format, args...)) } + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 6ec5c33357..30a9957c65 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -686,7 +686,11 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Compile the list of all the fields that we're going to be decoding // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] @@ -718,14 +722,16 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) + fields = append(fields, field{fieldType, structVal.Field(i)}) } } - for fieldType, field := range fields { - fieldName := fieldType.Name + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name - tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue := field.Tag.Get(d.config.TagName) tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue @@ -760,14 +766,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Delete the key we're using from the unused map so we stop tracking delete(dataValKeysUnused, rawMapKey.Interface()) - if !field.IsValid() { + if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. - if !field.CanSet() { + if !fieldValue.CanSet() { continue } @@ -777,7 +783,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) fieldName = fmt.Sprintf("%s.%s", name, fieldName) } - if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { errors = appendErrors(errors, err) } } diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go index 48594a7236..bec640b00e 100644 --- a/vendor/github.com/ncw/swift/largeobjects.go +++ b/vendor/github.com/ncw/swift/largeobjects.go @@ -276,8 +276,9 @@ func (file *largeObjectCreateFile) Size() int64 { } func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) { + endTimer := time.NewTimer(readAfterWriteTimeout) + defer endTimer.Stop() waitingTime := readAfterWriteWait - endTimer := time.After(readAfterWriteTimeout) for { var headers Headers var sz int64 @@ -288,11 +289,13 @@ func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err err } else { return } + waitTimer := time.NewTimer(waitingTime) select { - case <-endTimer: + case <-endTimer.C: + waitTimer.Stop() err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz) return - case <-time.After(waitingTime): + case <-waitTimer.C: waitingTime *= 2 } } diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index 1e8589c7b4..38e6965321 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -471,6 +471,7 @@ again: } if req != nil { timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() var resp *http.Response resp, err = c.doTimeoutRequest(timer, req) if err != nil { @@ -691,6 +692,7 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, URL.RawQuery = p.Parameters.Encode() } timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() reader := p.Body if reader != nil { reader = newWatchdogReader(reader, c.Timeout, timer) diff --git a/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/ncw/swift/timeout_reader.go index 3839e9ea0a..88ae733281 100644 --- a/vendor/github.com/ncw/swift/timeout_reader.go +++ b/vendor/github.com/ncw/swift/timeout_reader.go @@ -38,10 +38,12 @@ func (t *timeoutReader) Read(p []byte) (int, error) { done <- result{n, err} }() // Wait for the read or the timeout + timer := time.NewTimer(t.timeout) + defer timer.Stop() select { case r := <-done: return r.n, r.err - case <-time.After(t.timeout): + case <-timer.C: t.cancel() return 0, TimeoutError } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go similarity index 93% rename from vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go rename to vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go index 31ff3deb13..c5ca5d8623 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -1,4 +1,5 @@ -// +build linux,arm +// +build linux +// +build 386 arm package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index 3f7235ed15..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index d7891a2ffa..11c3faafbf 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,4 +1,5 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x package system diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go index 54e2a482c9..589b370486 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/conn.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go @@ -279,6 +279,16 @@ func WithMaxBufferSize(maxBufferSize int) connOption { } } +// WithMaxConnBufferSize sets maximum buffer size used to send and encode +// packets to Zookeeper server. The standard Zookeepeer client for java defaults +// to a limit of 1mb. This option should be used for non-standard server setup +// where znode is bigger than default 1mb. +func WithMaxConnBufferSize(maxBufferSize int) connOption { + return func(c *Conn) { + c.buf = make([]byte, maxBufferSize) + } +} + func (c *Conn) Close() { close(c.shouldQuit) diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index 250a43814c..aa126e44d1 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue { } func (i *countValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - // -1 means that no specific value was passed, so increment - if v == -1 { + // "+1" means that no specific value was passed, so increment + if s == "+1" { *i = countValue(*i + 1) - } else { - *i = countValue(v) + return nil } + v, err := strconv.ParseInt(s, 0, 0) + *i = countValue(v) return err } @@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) { // CountVarP is like CountVar only take a shorthand for the flag name. func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "-1" + flag.NoOptDefVal = "+1" } // CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 6f1fc3007a..73c237b078 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -202,12 +202,18 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag { func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { f.normalizeNameFunc = n f.sortedFormal = f.sortedFormal[:0] - for k, v := range f.orderedFormal { - delete(f.formal, NormalizedName(v.Name)) - nname := f.normalizeFlagName(v.Name) - v.Name = string(nname) - f.formal[nname] = v - f.orderedFormal[k] = v + for fname, flag := range f.formal { + nname := f.normalizeFlagName(flag.Name) + if fname == nname { + continue + } + flag.Name = string(nname) + delete(f.formal, fname) + f.formal[nname] = flag + if _, set := f.actual[fname]; set { + delete(f.actual, fname) + f.actual[nname] = flag + } } } @@ -440,13 +446,15 @@ func (f *FlagSet) Set(name, value string) error { return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) + if !flag.Changed { + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) - flag.Changed = true + flag.Changed = true + } if flag.Deprecated != "" { fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) @@ -556,6 +564,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = "int" case "uint64": name = "uint" + case "stringSlice": + name = "strings" + case "intSlice": + name = "ints" } return @@ -660,6 +672,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } default: line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -857,8 +873,10 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() + if f.errorHandling != ContinueOnError { + fmt.Fprintln(f.out(), err) + f.usage() + } return err } @@ -912,6 +930,9 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin } err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -962,6 +983,9 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -1034,6 +1058,7 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: + fmt.Println(err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go new file mode 100644 index 0000000000..f1a01d05e6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int16 Value +type int16Value int16 + +func newInt16Value(val int16, p *int16) *int16Value { + *p = val + return (*int16Value)(p) +} + +func (i *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + *i = int16Value(v) + return err +} + +func (i *int16Value) Type() string { + return "int16" +} + +func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 16) + if err != nil { + return 0, err + } + return int16(v), nil +} + +// GetInt16 returns the int16 value of a flag with the given name +func (f *FlagSet) GetInt16(name string) (int16, error) { + val, err := f.getFlagType(name, "int16", int16Conv) + if err != nil { + return 0, err + } + return val.(int16), nil +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func Int16Var(p *int16, name string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, "", value, usage) + return p +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, shorthand, value, usage) + return p +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func Int16(name string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, "", value, usage) +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func Int16P(name, shorthand string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/tv42/httpunix/LICENSE b/vendor/github.com/tv42/httpunix/LICENSE new file mode 100644 index 0000000000..33aec14578 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013-2015 Tommi Virtanen. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tv42/httpunix/httpunix.go b/vendor/github.com/tv42/httpunix/httpunix.go new file mode 100644 index 0000000000..95f5e95a81 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/httpunix.go @@ -0,0 +1,95 @@ +// Package httpunix provides a HTTP transport (net/http.RoundTripper) +// that uses Unix domain sockets instead of HTTP. +// +// This is useful for non-browser connections within the same host, as +// it allows using the file system for credentials of both client +// and server, and guaranteeing unique names. +// +// The URLs look like this: +// +// http+unix://LOCATION/PATH_ETC +// +// where LOCATION is translated to a file system path with +// Transport.RegisterLocation, and PATH_ETC follow normal http: scheme +// conventions. +package httpunix + +import ( + "bufio" + "errors" + "net" + "net/http" + "sync" + "time" +) + +// Scheme is the URL scheme used for HTTP over UNIX domain sockets. +const Scheme = "http+unix" + +// Transport is a http.RoundTripper that connects to Unix domain +// sockets. +type Transport struct { + DialTimeout time.Duration + RequestTimeout time.Duration + ResponseHeaderTimeout time.Duration + + mu sync.Mutex + // map a URL "hostname" to a UNIX domain socket path + loc map[string]string +} + +// RegisterLocation registers an URL location and maps it to the given +// file system path. +// +// Calling RegisterLocation twice for the same location is a +// programmer error, and causes a panic. +func (t *Transport) RegisterLocation(loc string, path string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.loc == nil { + t.loc = make(map[string]string) + } + if _, exists := t.loc[loc]; exists { + panic("location " + loc + " already registered") + } + t.loc[loc] = path +} + +var _ http.RoundTripper = (*Transport)(nil) + +// RoundTrip executes a single HTTP transaction. See +// net/http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL == nil { + return nil, errors.New("http+unix: nil Request.URL") + } + if req.URL.Scheme != Scheme { + return nil, errors.New("unsupported protocol scheme: " + req.URL.Scheme) + } + if req.URL.Host == "" { + return nil, errors.New("http+unix: no Host in request URL") + } + t.mu.Lock() + path, ok := t.loc[req.URL.Host] + t.mu.Unlock() + if !ok { + return nil, errors.New("unknown location: " + req.Host) + } + + c, err := net.DialTimeout("unix", path, t.DialTimeout) + if err != nil { + return nil, err + } + r := bufio.NewReader(c) + if t.RequestTimeout > 0 { + c.SetWriteDeadline(time.Now().Add(t.RequestTimeout)) + } + if err := req.Write(c); err != nil { + return nil, err + } + if t.ResponseHeaderTimeout > 0 { + c.SetReadDeadline(time.Now().Add(t.ResponseHeaderTimeout)) + } + resp, err := http.ReadResponse(r, req) + return resp, err +} diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go index 209f9ebad5..78b32055f3 100644 --- a/vendor/github.com/ugorji/go/codec/0doc.go +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a MIT license found in the LICENSE file. /* -High Performance, Feature-Rich Idiomatic Go codec/encoding library for -binc, msgpack, cbor, json. +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for +binc, msgpack, cbor, json Supported Serialization formats are: @@ -11,21 +11,17 @@ Supported Serialization formats are: - binc: http://github.com/ugorji/binc - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 - json: http://json.org http://tools.ietf.org/html/rfc7159 - - simple: + - simple: To install: go get github.com/ugorji/go/codec -This package understands the 'unsafe' tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion. - -To install using unsafe, pass the 'unsafe' tag: - - go get -tags=unsafe github.com/ugorji/go/codec +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . @@ -35,12 +31,17 @@ the standard library (ie json, xml, gob, etc). Rich Feature Set includes: - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Good code coverage ( > 70% ) - Very High Performance. Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores - Multiple conversions: - Package coerces types where appropriate + Package coerces types where appropriate e.g. decode an int in the stream into a float, etc. - - Corner Cases: + - Corner Cases: Overflows, nil maps/slices, nil values in streams are handled correctly - Standard field renaming via tags - Support for omitting empty fields during an encoding @@ -56,7 +57,7 @@ Rich Feature Set includes: - Fast (no-reflection) encoding/decoding of common maps and slices - Code-generation for faster performance. - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats - - Support indefinite-length formats to enable true streaming + - Support indefinite-length formats to enable true streaming (for formats which support it e.g. json, cbor) - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. This mostly applies to maps, where iteration order is non-deterministic. @@ -68,12 +69,12 @@ Rich Feature Set includes: - Encode/Decode from/to chan types (for iterative streaming support) - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosyncrasies of codecs e.g. - - For messagepack, configure how ambiguities in handling raw bytes are resolved - - For messagepack, provide rpc server/client codec to support + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support msgpack-rpc protocol defined at: https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md - + Extension Support Users can register a function to handle the encoding or decoding of @@ -92,6 +93,27 @@ encoded as an empty map because it has no exported fields, while UUID would be encoded as a string. However, with extension support, you can encode any of these however you like. +Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + RPC RPC Client and Server Codecs are implemented, so the codecs can be used @@ -160,40 +182,25 @@ Sample usage model: //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) client := rpc.NewClientWithCodec(rpcCodec) +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + */ package codec -// Benefits of go-codec: -// -// - encoding/json always reads whole file into memory first. -// This makes it unsuitable for parsing very large files. -// - encoding/xml cannot parse into a map[string]interface{} -// I found this out on reading https://github.com/clbanning/mxj - -// TODO: -// -// - optimization for codecgen: -// if len of entity is <= 3 words, then support a value receiver for encode. -// - (En|De)coder should store an error when it occurs. -// Until reset, subsequent calls return that error that was stored. -// This means that free panics must go away. -// All errors must be raised through errorf method. -// - Decoding using a chan is good, but incurs concurrency costs. -// This is because there's no fast way to use a channel without it -// having to switch goroutines constantly. -// Callback pattern is still the best. Maybe consider supporting something like: -// type X struct { -// Name string -// Ys []Y -// Ys chan <- Y -// Ys func(Y) -> call this function for each entry -// } -// - Consider adding a isZeroer interface { isZero() bool } -// It is used within isEmpty, for omitEmpty support. -// - Consider making Handle used AS-IS within the encoding/decoding session. -// This means that we don't cache Handle information within the (En|De)coder, -// except we really need it at Reset(...) -// - Consider adding math/big support -// - Consider reducing the size of the generated functions: -// Maybe use one loop, and put the conditionals in the loop. -// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md index 91cb3a27bd..95c7d61769 100644 --- a/vendor/github.com/ugorji/go/codec/README.md +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -15,17 +15,11 @@ To install: go get github.com/ugorji/go/codec -This package understands the `unsafe` tag, to allow using unsafe semantics: - - - When decoding into a struct, you need to read the field name as a string - so you can find the struct field it is mapped to. - Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion. - -To use it, you must pass the `unsafe` tag during install: - -``` -go install -tags=unsafe github.com/ugorji/go/codec -``` +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. Online documentation: http://godoc.org/github.com/ugorji/go/codec Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer @@ -36,8 +30,13 @@ the standard library (ie json, xml, gob, etc). Rich Feature Set includes: - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Good code coverage ( > 70% ) - Very High Performance. Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores - Multiple conversions: Package coerces types where appropriate e.g. decode an int in the stream into a float, etc. @@ -92,6 +91,27 @@ encoded as an empty map because it has no exported fields, while UUID would be encoded as a string. However, with extension support, you can encode any of these however you like. +## Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + ## RPC RPC Client and Server Codecs are implemented, so the codecs can be used @@ -146,3 +166,22 @@ Typical usage model: //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) client := rpc.NewClientWithCodec(rpcCodec) +## Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +## Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go index 618c51505f..be5b7d3380 100644 --- a/vendor/github.com/ugorji/go/codec/binc.go +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -61,7 +61,8 @@ type bincEncDriver struct { m map[string]uint16 // symbols b [scratchByteArrayLen]byte s uint16 // symbols sequencer - encNoSeparator + // encNoSeparator + encDriverNoopContainerWriter } func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool { @@ -195,11 +196,11 @@ func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { e.w.writen1(xtag) } -func (e *bincEncDriver) EncodeArrayStart(length int) { +func (e *bincEncDriver) WriteArrayStart(length int) { e.encLen(bincVdArray<<4, uint64(length)) } -func (e *bincEncDriver) EncodeMapStart(length int) { +func (e *bincEncDriver) WriteMapStart(length int) { e.encLen(bincVdMap<<4, uint64(length)) } @@ -332,13 +333,14 @@ type bincDecDriver struct { bd byte vd byte vs byte - noStreamingCodec - decNoSeparator + // noStreamingCodec + // decNoSeparator b [scratchByteArrayLen]byte // linear searching on this slice is ok, // because we typically expect < 32 symbols in each stream. s []bincDecSymbol + decDriverNoopContainerReader } func (d *bincDecDriver) readNextBd() { @@ -512,7 +514,7 @@ func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) { i = -i } if chkOvf.Int(i, bitsize) { - d.d.errorf("binc: overflow integer: %v", i) + d.d.errorf("binc: overflow integer: %v for num bits: %v", i, bitsize) return } d.bdRead = false @@ -728,11 +730,12 @@ func (d *bincDecDriver) DecodeString() (s string) { return } -func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { - if isstring { - bsOut, _ = d.decStringAndBytes(bs, false, zerocopy) - return - } +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { if !d.bdRead { d.readNextBd() } @@ -789,7 +792,7 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b } xbs = d.r.readx(l) } else if d.vd == bincVdByteArray { - xbs = d.DecodeBytes(nil, false, true) + xbs = d.DecodeBytes(nil, true) } else { d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) return @@ -803,7 +806,7 @@ func (d *bincDecDriver) DecodeNaked() { d.readNextBd() } - n := &d.d.n + n := d.d.n var decodeFurther bool switch d.vd { @@ -858,7 +861,7 @@ func (d *bincDecDriver) DecodeNaked() { n.s = d.DecodeString() case bincVdByteArray: n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) + n.l = d.DecodeBytes(nil, false) case bincVdTimestamp: n.v = valueTypeTimestamp tt, err := decodeTime(d.r.readx(int(d.vs))) @@ -908,6 +911,7 @@ func (d *bincDecDriver) DecodeNaked() { type BincHandle struct { BasicHandle binaryEncodingType + noElemSeparators } func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { @@ -922,6 +926,10 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver { return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} } +func (_ *BincHandle) IsBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + func (e *bincEncDriver) reset() { e.w = e.e.w e.s = 0 diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go index 4c72caffb2..3bc328f306 100644 --- a/vendor/github.com/ugorji/go/codec/cbor.go +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -61,7 +61,8 @@ const ( type cborEncDriver struct { noBuiltInTypes - encNoSeparator + encDriverNoopContainerWriter + // encNoSeparator e *Encoder w encWriter h *CborHandle @@ -134,39 +135,89 @@ func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Enco func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { e.encUint(uint64(re.Tag), cborBaseTag) - if re.Data != nil { + if false && re.Data != nil { en.encode(re.Data) - } else if re.Value == nil { - e.EncodeNil() - } else { + } else if re.Value != nil { en.encode(re.Value) + } else { + e.EncodeNil() } } -func (e *cborEncDriver) EncodeArrayStart(length int) { - e.encLen(cborBaseArray, length) +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } } -func (e *cborEncDriver) EncodeMapStart(length int) { - e.encLen(cborBaseMap, length) +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } } -func (e *cborEncDriver) EncodeString(c charEncoding, v string) { - e.encLen(cborBaseString, len(v)) - e.w.writestr(v) +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } } func (e *cborEncDriver) EncodeSymbol(v string) { - e.EncodeString(c_UTF8, v) + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) } func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { if c == c_RAW { - e.encLen(cborBaseBytes, len(v)) + e.encStringBytesS(cborBaseBytes, stringView(v)) } else { - e.encLen(cborBaseString, len(v)) + e.encStringBytesS(cborBaseString, stringView(v)) + } +} + +func (e *cborEncDriver) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + blen := len(v) / 4 + if blen == 0 { + blen = 64 + } else if blen > 1024 { + blen = 1024 + } + for i := 0; i < len(v); { + var v2 string + i2 := i + blen + if i2 < len(v) { + v2 = v[i:i2] + } else { + v2 = v[i:] + } + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) } - e.w.writeb(v) } // ---------------------- @@ -180,7 +231,8 @@ type cborDecDriver struct { bdRead bool bd byte noBuiltInTypes - decNoSeparator + // decNoSeparator + decDriverNoopContainerReader } func (d *cborDecDriver) readNextBd() { @@ -407,7 +459,7 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { return bs } -func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { if !d.bdRead { d.readNextBd() } @@ -416,8 +468,9 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [ return nil } if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + d.bdRead = false if bs == nil { - return d.decAppendIndefiniteBytes(nil) + return d.decAppendIndefiniteBytes(zeroByteSlice) } return d.decAppendIndefiniteBytes(bs[:0]) } @@ -434,7 +487,11 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [ } func (d *cborDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) } func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { @@ -465,7 +522,7 @@ func (d *cborDecDriver) DecodeNaked() { d.readNextBd() } - n := &d.d.n + n := d.d.n var decodeFurther bool switch d.bd { @@ -485,7 +542,7 @@ func (d *cborDecDriver) DecodeNaked() { n.f = d.DecodeFloat(false) case cborBdIndefiniteBytes: n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) + n.l = d.DecodeBytes(nil, false) case cborBdIndefiniteString: n.v = valueTypeString n.s = d.DecodeString() @@ -510,7 +567,7 @@ func (d *cborDecDriver) DecodeNaked() { n.i = d.DecodeInt(64) case d.bd >= cborBaseBytes && d.bd < cborBaseString: n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) + n.l = d.DecodeBytes(nil, false) case d.bd >= cborBaseString && d.bd < cborBaseArray: n.v = valueTypeString n.s = d.DecodeString() @@ -573,7 +630,11 @@ func (d *cborDecDriver) DecodeNaked() { // type CborHandle struct { binaryEncodingType + noElemSeparators BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool } func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go index 2563668ba7..135e298293 100644 --- a/vendor/github.com/ugorji/go/codec/decode.go +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "reflect" + "sync" "time" ) @@ -21,6 +22,10 @@ const ( var ( onlyMapOrArrayCanDecodeIntoStructErr = errors.New("only encoded map or array can be decoded into a struct") cannotDecodeIntoNilErr = errors.New("cannot decode into nil") + + decUnreadByteNothingToReadErr = errors.New("cannot unread - nothing has been read") + decUnreadByteLastByteNotReadErr = errors.New("cannot unread - last byte has not been read") + decUnreadByteUnknownErr = errors.New("cannot unread - reason unknown") ) // decReader abstracts the reading source, allowing implementations that can @@ -34,24 +39,29 @@ type decReader interface { readx(n int) []byte readb([]byte) readn1() uint8 - readn1eof() (v uint8, eof bool) numread() int // number of bytes read track() stopTrack() []byte -} -type decReaderByteScanner interface { - io.Reader - io.ByteScanner + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) } type decDriver interface { // this will check if the next token is a break. CheckBreak() bool + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. TryDecodeAsNil() bool // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. ContainerType() (vt valueType) - IsBuiltinType(rt uintptr) bool + // IsBuiltinType(rt uintptr) bool DecodeBuiltin(rt uintptr, v interface{}) // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. @@ -76,25 +86,41 @@ type decDriver interface { // return a pre-stored string value, meaning that it can bypass // the cost of []byte->string conversion. DecodeString() (s string) + DecodeStringAsBytes() (v []byte) // DecodeBytes may be called directly, without going through reflection. // Consequently, it must be designed to handle possible nil. - DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) // decodeExt will decode into a *RawExt or into an extension. DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) - ReadMapStart() int ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() reset() uncacheRead() } -type decNoSeparator struct { -} +// type decNoSeparator struct {} +// func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) ReadEnd() {} +type decDriverNoopContainerReader struct{} + +func (_ decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadArrayElem() {} +func (_ decDriverNoopContainerReader) ReadArrayEnd() {} +func (_ decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (_ decDriverNoopContainerReader) ReadMapElemKey() {} +func (_ decDriverNoopContainerReader) ReadMapElemValue() {} +func (_ decDriverNoopContainerReader) ReadMapEnd() {} +func (_ decDriverNoopContainerReader) CheckBreak() (v bool) { return } // func (_ decNoSeparator) uncacheRead() {} @@ -141,6 +167,11 @@ type DecodeOptions struct { // or is an interface. MapValueReset bool + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + // InterfaceReset controls how we decode into an interface. // // By default, when we see a field that is an interface{...}, @@ -174,20 +205,362 @@ type DecodeOptions struct { // *Note*: This only applies if using go1.5 and above, // as it requires reflect.ArrayOf support which was absent before go1.5. PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int } // ------------------------------------ -// ioDecByteScanner implements Read(), ReadByte(...), UnreadByte(...) methods -// of io.Reader, io.ByteScanner. -type ioDecByteScanner struct { - r io.Reader - l byte // last byte - ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread - b [1]byte // tiny buffer for reading single bytes +type bufioDecReader struct { + buf []byte + r io.Reader + + c int // cursor + n int // num read + err error + + trb bool + tr []byte + + b [8]byte } -func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { +func (z *bufioDecReader) reset(r io.Reader) { + z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *bufioDecReader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + p0 := p + n = copy(p, z.buf[z.c:]) + z.c += n + if z.c == len(z.buf) { + z.c = 0 + } + z.n += n + if len(p) == n { + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + p = p[n:] + var n2 int + // if we are here, then z.buf is all read + if len(p) > len(z.buf) { + n2, err = decReadFull(z.r, p) + n += n2 + z.n += n2 + z.err = err + // don't return EOF if some bytes were read. keep for next time. + if n > 0 && err == io.EOF { + err = nil + } + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= len(z.buf) + for len(p) > 0 && z.err == nil { + // println("len(p) loop starting ... ") + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 > 0 { + if err == io.EOF { + err = nil + } + z.buf = z.buf[:n2] + n2 = copy(p, z.buf) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + } + z.err = err + // println("... len(p) loop done") + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return +} + +func (z *bufioDecReader) ReadByte() (b byte, err error) { + z.b[0] = 0 + _, err = z.Read(z.b[:1]) + b = z.b[0] + return +} + +func (z *bufioDecReader) UnreadByte() (err error) { + if z.err != nil { + return z.err + } + if z.c > 0 { + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } + return + } + return decUnreadByteNothingToReadErr +} + +func (z *bufioDecReader) numread() int { + return z.n +} + +func (z *bufioDecReader) readx(n int) (bs []byte) { + if n <= 0 || z.err != nil { + return + } + if z.c+n <= len(z.buf) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + return + } + bs = make([]byte, n) + _, err := z.Read(bs) + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) readb(bs []byte) { + _, err := z.Read(bs) + if err != nil { + panic(err) + } +} + +// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) { +// b, err := z.ReadByte() +// if err != nil { +// if err == io.EOF { +// eof = true +// } else { +// panic(err) +// } +// } +// return +// } + +func (z *bufioDecReader) readn1() (b uint8) { + b, err := z.ReadByte() + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) { + // flag: 1 (skip), 2 (readTo), 4 (readUntil) + if flag == 4 { + for i := z.c; i < len(z.buf); i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := z.c; i < len(z.buf); i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + if flag == 1 { + i++ + } else { + out = z.buf[z.c:i] + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + z.n += len(z.buf) - z.c + if flag != 1 { + out = append(in, z.buf[z.c:]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + if z.err != nil { + return + } + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, z.err = z.r.Read(z.buf) + if n2 > 0 && z.err != nil { + z.err = nil + } + z.buf = z.buf[:n2] + if flag == 4 { + for i := 0; i < n2; i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n += i - 1 + i++ + out = append(out, z.buf[z.c:i]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := 0; i < n2; i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n += i - 1 + if flag == 1 { + i++ + } + if flag != 1 { + out = append(out, z.buf[z.c:i]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + if flag != 1 { + out = append(out, z.buf[:n2]...) + } + z.n += n2 + if z.err != nil { + return + } + if z.trb { + z.tr = append(z.tr, z.buf[:n2]...) + } + } +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + token, _ = z.search(nil, accept, 0, 1) + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + _, out = z.search(in, accept, 0, 2) + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + _, out = z.search(in, nil, stop, 4) + return +} + +func (z *bufioDecReader) unreadn1() { + err := z.UnreadByte() + if err != nil { + panic(err) + } +} + +func (z *bufioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *bufioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + r io.Reader // the reader passed in + + rr io.Reader + br io.ByteScanner + + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + b [4]byte // tiny buffer for reading single bytes + trb bool // tracking bytes turned on + + // temp byte array re-used internally for efficiency during read. + // shares buffer with Decoder, so we keep size of struct within 8 words. + x *[scratchByteArrayLen]byte + n int // num read + tr []byte // tracking bytes read +} + +func (z *ioDecReader) reset(r io.Reader) { + z.r = r + z.rr = r + z.l, z.ls, z.n, z.trb = 0, 0, 0, false + if z.tr != nil { + z.tr = z.tr[:0] + } + var ok bool + if z.br, ok = r.(io.ByteScanner); !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } var firstByte bool if z.ls == 1 { z.ls = 2 @@ -213,8 +586,8 @@ func (z *ioDecByteScanner) Read(p []byte) (n int, err error) { return } -func (z *ioDecByteScanner) ReadByte() (c byte, err error) { - n, err := z.Read(z.b[:]) +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) if n == 1 { c = z.b[0] if err == io.EOF { @@ -224,30 +597,20 @@ func (z *ioDecByteScanner) ReadByte() (c byte, err error) { return } -func (z *ioDecByteScanner) UnreadByte() (err error) { - x := z.ls - if x == 0 { - err = errors.New("cannot unread - nothing has been read") - } else if x == 1 { - err = errors.New("cannot unread - last byte has not been read") - } else if x == 2 { +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case 2: z.ls = 1 + case 0: + err = decUnreadByteNothingToReadErr + case 1: + err = decUnreadByteLastByteNotReadErr + default: + err = decUnreadByteUnknownErr } return } -// ioDecReader is a decReader that reads off an io.Reader -type ioDecReader struct { - br decReaderByteScanner - // temp byte array re-used internally for efficiency during read. - // shares buffer with Decoder, so we keep size of struct within 8 words. - x *[scratchByteArrayLen]byte - bs ioDecByteScanner - n int // num read - tr []byte // tracking bytes read - trb bool -} - func (z *ioDecReader) numread() int { return z.n } @@ -261,7 +624,7 @@ func (z *ioDecReader) readx(n int) (bs []byte) { } else { bs = make([]byte, n) } - if _, err := io.ReadAtLeast(z.br, bs, n); err != nil { + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } z.n += len(bs) @@ -272,31 +635,18 @@ func (z *ioDecReader) readx(n int) (bs []byte) { } func (z *ioDecReader) readb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := io.ReadAtLeast(z.br, bs, len(bs)) - z.n += n - if err != nil { + // if len(bs) == 0 { + // return + // } + if _, err := decReadFull(z.rr, bs); err != nil { panic(err) } + z.n += len(bs) if z.trb { z.tr = append(z.tr, bs...) } } -func (z *ioDecReader) readn1() (b uint8) { - b, err := z.br.ReadByte() - if err != nil { - panic(err) - } - z.n++ - if z.trb { - z.tr = append(z.tr, b) - } - return b -} - func (z *ioDecReader) readn1eof() (b uint8, eof bool) { b, err := z.br.ReadByte() if err == nil { @@ -312,6 +662,62 @@ func (z *ioDecReader) readn1eof() (b uint8, eof bool) { return } +func (z *ioDecReader) readn1() (b uint8) { + var err error + if b, err = z.br.ReadByte(); err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + for { + var eof bool + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + continue + } + return + } +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + return + } + if accept.isset(token) { + out = append(out, token) + } else { + z.unreadn1() + return + } + } +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + } +} + func (z *ioDecReader) unreadn1() { err := z.br.UnreadByte() if err != nil { @@ -388,6 +794,10 @@ func (z *bytesDecReader) readx(n int) (bs []byte) { return } +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + func (z *bytesDecReader) readn1() (v uint8) { if z.a == 0 { panic(io.EOF) @@ -398,19 +808,69 @@ func (z *bytesDecReader) readn1() (v uint8) { return } -func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { if z.a == 0 { - eof = true return } - v = z.b[z.c] - z.c++ - z.a-- + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + token = z.b[i] + i++ + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen return } -func (z *bytesDecReader) readb(bs []byte) { - copy(bs, z.readx(len(bs))) +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + out = z.b[z.c:] + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + if z.a == 0 { + panic(io.EOF) + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + panic(io.EOF) } func (z *bytesDecReader) track() { @@ -421,167 +881,62 @@ func (z *bytesDecReader) stopTrack() (bs []byte) { return z.b[z.t:z.c] } -// ------------------------------------ - -type decFnInfo struct { - d *Decoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType -} - // ---------------------------------------- -type decFn struct { - i decFnInfo - f func(*decFnInfo, reflect.Value) +func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) } -func (f *decFnInfo) builtin(rv reflect.Value) { - f.d.d.DecodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) } -func (f *decFnInfo) rawExt(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) } -func (f *decFnInfo) raw(rv reflect.Value) { - rv.SetBytes(f.d.raw()) +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) } -func (f *decFnInfo) ext(rv reflect.Value) { - f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) -} - -func (f *decFnInfo) getValueForUnmarshalInterface(rv reflect.Value, indir int8) (v interface{}) { - if indir == -1 { - v = rv.Addr().Interface() - } else if indir == 0 { - v = rv.Interface() - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - v = rv.Interface() - } - return -} - -func (f *decFnInfo) selferUnmarshal(rv reflect.Value) { - f.getValueForUnmarshalInterface(rv, f.ti.csIndir).(Selfer).CodecDecodeSelf(f.d) -} - -func (f *decFnInfo) binaryUnmarshal(rv reflect.Value) { - bm := f.getValueForUnmarshalInterface(rv, f.ti.bunmIndir).(encoding.BinaryUnmarshaler) - xbs := f.d.d.DecodeBytes(nil, false, true) +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { panic(fnerr) } } -func (f *decFnInfo) textUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.tunmIndir).(encoding.TextUnmarshaler) - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) if fnerr != nil { panic(fnerr) } } -func (f *decFnInfo) jsonUnmarshal(rv reflect.Value) { - tm := f.getValueForUnmarshalInterface(rv, f.ti.junmIndir).(jsonUnmarshaler) - // bs := f.d.d.DecodeBytes(f.d.b[:], true, true) +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. - fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) if fnerr != nil { panic(fnerr) } } -func (f *decFnInfo) kErr(rv reflect.Value) { - f.d.errorf("no decoding function defined for kind %v", rv.Kind()) +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) } -func (f *decFnInfo) kString(rv reflect.Value) { - rv.SetString(f.d.d.DecodeString()) -} - -func (f *decFnInfo) kBool(rv reflect.Value) { - rv.SetBool(f.d.d.DecodeBool()) -} - -func (f *decFnInfo) kInt(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(intBitsize)) -} - -func (f *decFnInfo) kInt64(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(64)) -} - -func (f *decFnInfo) kInt32(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(32)) -} - -func (f *decFnInfo) kInt8(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(8)) -} - -func (f *decFnInfo) kInt16(rv reflect.Value) { - rv.SetInt(f.d.d.DecodeInt(16)) -} - -func (f *decFnInfo) kFloat32(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(true)) -} - -func (f *decFnInfo) kFloat64(rv reflect.Value) { - rv.SetFloat(f.d.d.DecodeFloat(false)) -} - -func (f *decFnInfo) kUint8(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(8)) -} - -func (f *decFnInfo) kUint64(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(64)) -} - -func (f *decFnInfo) kUint(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUintptr(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(uintBitsize)) -} - -func (f *decFnInfo) kUint32(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(32)) -} - -func (f *decFnInfo) kUint16(rv reflect.Value) { - rv.SetUint(f.d.d.DecodeUint(16)) -} - -// func (f *decFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") -// if rv.IsNil() { -// rv.Set(reflect.New(rv.Type().Elem())) -// } -// f.d.decodeValue(rv.Elem()) -// } - // var kIntfCtr uint64 -func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { // nil interface: // use some hieristics to decode it appropriately // based on the detected next value in the stream. - d := f.d + n := d.naked() d.d.DecodeNaked() - n := &d.n if n.v == valueTypeNil { return } @@ -594,268 +949,277 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { // var useRvn bool switch n.v { case valueTypeMap: - // if d.h.MapType == nil || d.h.MapType == mapIntfIntfTyp { - // } else if d.h.MapType == mapStrIntfTyp { // for json performance - // } if d.mtid == 0 || d.mtid == mapIntfIntfTypId { - l := len(n.ms) - n.ms = append(n.ms, nil) - var v2 interface{} = &n.ms[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ms = n.ms[:l] + if n.lm < arrayCacheLen { + n.ma[n.lm] = nil + rvn = n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+n.lm] + n.lm++ + d.decode(&n.ma[n.lm-1]) + n.lm-- + } else { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } } else if d.mtid == mapStrIntfTypId { // for json performance - l := len(n.ns) - n.ns = append(n.ns, nil) - var v2 interface{} = &n.ns[l] - d.decode(v2) - rvn = reflect.ValueOf(v2).Elem() - n.ns = n.ns[:l] - } else { - rvn = reflect.New(d.h.MapType).Elem() - d.decodeValue(rvn, nil) - } - case valueTypeArray: - // if d.h.SliceType == nil || d.h.SliceType == intfSliceTyp { - if d.stid == 0 || d.stid == intfSliceTypId { - l := len(n.ss) - n.ss = append(n.ss, nil) - var v2 interface{} = &n.ss[l] - d.decode(v2) - n.ss = n.ss[:l] - rvn = reflect.ValueOf(v2).Elem() - if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { - rvn = reflectArrayOf(rvn) + if n.ln < arrayCacheLen { + n.na[n.ln] = nil + rvn = n.rr[decNakedMapStrIntfIdx*arrayCacheLen+n.ln] + n.ln++ + d.decode(&n.na[n.ln-1]) + n.ln-- + } else { + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() } } else { - rvn = reflect.New(d.h.SliceType).Elem() - d.decodeValue(rvn, nil) + rvn = reflect.New(d.h.MapType) + if useLookupRecognizedTypes && d.mtr { // isRecognizedRtid(d.mtid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + if n.ls < arrayCacheLen { + n.sa[n.ls] = nil + rvn = n.rr[decNakedSliceIntfIdx*arrayCacheLen+n.ls] + n.ls++ + d.decode(&n.sa[n.ls-1]) + n.ls-- + } else { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + rvn = reflect.New(d.h.SliceType) + if useLookupRecognizedTypes && d.str { // isRecognizedRtid(d.stid) { + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = rvn.Elem() + d.decodeValue(rvn, nil, false, true) + } } case valueTypeExt: var v interface{} tag, bytes := n.u, n.l // calling decode below might taint the values if bytes == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - v = *v2 - n.is = n.is[:l] + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + // v = *(&n.ia[l]) + n.li-- + v = n.ia[n.li] + n.ia[n.li] = nil + } else { + d.decode(&v) + } } bfn := d.h.getExtForTag(tag) if bfn == nil { var re RawExt re.Tag = tag re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) - rvn = reflect.ValueOf(re) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() } else { rvnA := reflect.New(bfn.rt) - rvn = rvnA.Elem() if bytes != nil { - bfn.ext.ReadExt(rvnA.Interface(), bytes) + bfn.ext.ReadExt(rv2i(rvnA), bytes) } else { - bfn.ext.UpdateExt(rvnA.Interface(), v) + bfn.ext.UpdateExt(rv2i(rvnA), v) } + rvn = rvnA.Elem() } case valueTypeNil: // no-op case valueTypeInt: - rvn = reflect.ValueOf(&n.i).Elem() + rvn = n.rr[decNakedIntIdx] // d.np.get(&n.i) case valueTypeUint: - rvn = reflect.ValueOf(&n.u).Elem() + rvn = n.rr[decNakedUintIdx] // d.np.get(&n.u) case valueTypeFloat: - rvn = reflect.ValueOf(&n.f).Elem() + rvn = n.rr[decNakedFloatIdx] // d.np.get(&n.f) case valueTypeBool: - rvn = reflect.ValueOf(&n.b).Elem() + rvn = n.rr[decNakedBoolIdx] // d.np.get(&n.b) case valueTypeString, valueTypeSymbol: - rvn = reflect.ValueOf(&n.s).Elem() + rvn = n.rr[decNakedStringIdx] // d.np.get(&n.s) case valueTypeBytes: - rvn = reflect.ValueOf(&n.l).Elem() + rvn = n.rr[decNakedBytesIdx] // d.np.get(&n.l) case valueTypeTimestamp: - rvn = reflect.ValueOf(&n.t).Elem() + rvn = n.rr[decNakedTimeIdx] // d.np.get(&n.t) default: panic(fmt.Errorf("kInterfaceNaked: unexpected valueType: %d", n.v)) } return } -func (f *decFnInfo) kInterface(rv reflect.Value) { - // debugf("\t===> kInterface") - +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { // Note: // A consequence of how kInterface works, is that // if an interface already contains something, we try // to decode into what was there before. // We do not replace with a generic value (as got from decodeNaked). + // every interface passed here MUST be settable. var rvn reflect.Value if rv.IsNil() { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { rv.Set(rvn) } - } else if f.d.h.InterfaceReset { - rvn = f.kInterfaceNaked() - if rvn.IsValid() { + return + } + if d.h.InterfaceReset { + if rvn = d.kInterfaceNaked(f); rvn.IsValid() { rv.Set(rvn) } else { // reset to zero value based on current type in there. rv.Set(reflect.Zero(rv.Elem().Type())) } - } else { - rvn = rv.Elem() - // Note: interface{} is settable, but underlying type may not be. - // Consequently, we have to set the reflect.Value directly. - // if underlying type is settable (e.g. ptr or interface), - // we just decode into it. - // Else we create a settable value, decode into it, and set on the interface. - if rvn.CanSet() { - f.d.decodeValue(rvn, nil) - } else { - rvn2 := reflect.New(rvn.Type()).Elem() - rvn2.Set(rvn) - f.d.decodeValue(rvn2, nil) - rv.Set(rvn2) - } + return } + + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true, true) + rv.Set(rvn2) } -func (f *decFnInfo) kStruct(rv reflect.Value) { +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + // checking if recognized within kstruct is too expensive. + // only check where you can determine if valid outside the loop + // ie on homogenous collections: slices, arrays and maps. + // + // if true, we don't create too many decFn's. + // It's a delicate balance. + const checkRecognized bool = false // false: TODO + fti := f.ti - d := f.d dd := d.d - cr := d.cr + elemsep := d.hh.hasElemSeparators() + sfn := structFieldNode{v: rv, update: true} ctyp := dd.ContainerType() if ctyp == valueTypeMap { containerLen := dd.ReadMapStart() if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return } tisfi := fti.sfi hasLen := containerLen >= 0 - if hasLen { - for j := 0; j < containerLen; j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencnameB := dd.DecodeBytes(f.d.b[:], true, true) - rvkencname := stringView(rvkencnameB) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView + + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // rvkencname := dd.DecodeString() + if elemsep { + dd.ReadMapElemKey() } - } else { - for j := 0; !dd.CheckBreak(); j++ { - // rvkencname := dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - rvkencnameB := dd.DecodeBytes(f.d.b[:], true, true) - rvkencname := stringView(rvkencnameB) - // rvksi := ti.getForEncName(rvkencname) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if k := fti.indexForEncName(rvkencname); k > -1 { - si := tisfi[k] - if dd.TryDecodeAsNil() { - si.setToZeroValue(rv) - } else { - d.decodeValue(si.field(rv, true), nil) - } - } else { - d.structFieldNotFound(-1, rvkencname) - } - keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView + rvkencnameB := dd.DecodeStringAsBytes() + rvkencname := stringView(rvkencnameB) + // rvksi := ti.getForEncName(rvkencname) + if elemsep { + dd.ReadMapElemValue() } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, checkRecognized, true) + } + } else { + d.structFieldNotFound(-1, rvkencname) + } + // keepAlive4StringView(rvkencnameB) // maintain ref 4 stringView // not needed, as reference is outside loop } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() } else if ctyp == valueTypeArray { containerLen := dd.ReadArrayStart() if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + dd.ReadArrayEnd() return } // Not much gain from doing it two ways for array. // Arrays are not used as much for structs. hasLen := containerLen >= 0 for j, si := range fti.sfip { - if hasLen { - if j == containerLen { - break - } - } else if dd.CheckBreak() { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { break } - if cr != nil { - cr.sendContainerState(containerArrayElem) + if elemsep { + dd.ReadArrayElem() } if dd.TryDecodeAsNil() { si.setToZeroValue(rv) } else { - d.decodeValue(si.field(rv, true), nil) + d.decodeValue(sfn.field(si), nil, checkRecognized, true) } } if containerLen > len(fti.sfip) { // read remaining values and throw away for j := len(fti.sfip); j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if elemsep { + dd.ReadArrayElem() } d.structFieldNotFound(j, "") } } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + dd.ReadArrayEnd() } else { - f.d.error(onlyMapOrArrayCanDecodeIntoStructErr) + d.error(onlyMapOrArrayCanDecodeIntoStructErr) return } } -func (f *decFnInfo) kSlice(rv reflect.Value) { +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { // A slice can be set from a map or array in stream. // This way, the order can be kept (as order is lost with map). ti := f.ti - d := f.d dd := d.d rtelem0 := ti.rt.Elem() ctyp := dd.ContainerType() if ctyp == valueTypeBytes || ctyp == valueTypeString { // you can only decode bytes or string in the stream into a slice or array of bytes if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { - f.d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) + d.errorf("bytes or string in the stream must be decoded into a slice or array of bytes, not %v", ti.rt) } if f.seq == seqTypeChan { - bs2 := dd.DecodeBytes(nil, false, true) - ch := rv.Interface().(chan<- byte) + bs2 := dd.DecodeBytes(nil, true) + ch := rv2i(rv).(chan<- byte) for _, b := range bs2 { ch <- b } } else { rvbs := rv.Bytes() - bs2 := dd.DecodeBytes(rvbs, false, false) + bs2 := dd.DecodeBytes(rvbs, false) if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { if rv.CanSet() { rv.SetBytes(bs2) @@ -871,155 +1235,182 @@ func (f *decFnInfo) kSlice(rv reflect.Value) { slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - // // an array can never return a nil slice. so no need to check f.array here. + // an array can never return a nil slice. so no need to check f.array here. if containerLenS == 0 { - if f.seq == seqTypeSlice { - if rv.IsNil() { - rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) - } else { - rv.SetLen(0) - } - } else if f.seq == seqTypeChan { - if rv.IsNil() { - rv.Set(reflect.MakeChan(ti.rt, 0)) + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } } } slh.End() return } + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtElem0Id := rt2id(rtelem0) + rtelem0Mut := !isImmutableKind(rtElem0Kind) rtelem := rtelem0 - for rtelem.Kind() == reflect.Ptr { + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() } - fn := d.getDecFn(rtelem, true, true) + + var fn *codecFn var rv0, rv9 reflect.Value rv0 = rv rvChanged := false - // for j := 0; j < containerLenS; j++ { - var rvlen int - if containerLenS > 0 { // hasLen - if f.seq == seqTypeChan { - if rv.IsNil() { - rvlen, _ = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - rv.Set(reflect.MakeChan(ti.rt, rvlen)) - } - // handle chan specially: - for j := 0; j < containerLenS; j++ { - rv9 = reflect.New(rtelem0).Elem() - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - rv.Send(rv9) - } - } else { // slice or array - var truncated bool // says len of sequence is not same as expected number of elements - numToRead := containerLenS // if truncated, reset numToRead - - rvcap := rv.Cap() - rvlen = rv.Len() - if containerLenS > rvcap { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, containerLenS) + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rv.CanSet() { + rv.SetLen(rvlen) } else { - oldRvlenGtZero := rvlen > 0 - rvlen, truncated = decInferLen(containerLenS, f.d.h.MaxInitLen, int(rtelem0.Size())) - if truncated { - if rvlen <= rvcap { - rv.SetLen(rvlen) - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - } else { - rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) - rvChanged = true - } - if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { - reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) - } - rvcap = rvlen + rv = rv.Slice(0, rvlen) + rvChanged = true } - numToRead = rvlen - } else if containerLenS != rvlen { - if f.seq == seqTypeSlice { - rv.SetLen(containerLenS) - rvlen = containerLenS - } - } - j := 0 - // we read up to the numToRead - for ; j < numToRead; j++ { - slh.ElemContainerState(j) - d.decodeValue(rv.Index(j), fn) - } - - // if slice, expand and read up to containerLenS (or EOF) iff truncated - // if array, swallow all the rest. - - if f.seq == seqTypeArray { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } else if truncated { // slice was truncated, as chan NOT in this block - for ; j < containerLenS; j++ { - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - slh.ElemContainerState(j) - d.decodeValue(rv9, fn) - } - } - } - } else { - rvlen = rv.Len() - j := 0 - for ; !dd.CheckBreak(); j++ { - if f.seq == seqTypeChan { - slh.ElemContainerState(j) - rv9 = reflect.New(rtelem0).Elem() - d.decodeValue(rv9, fn) - rv.Send(rv9) } else { - // if indefinite, etc, then expand the slice if necessary - var decodeIntoBlank bool - if j >= rvlen { - if f.seq == seqTypeArray { - d.arrayCannotExpand(rvlen, j+1) - decodeIntoBlank = true - } else { // if f.seq == seqTypeSlice - // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs - rv = expandSliceValue(rv, 1) - rv9 = rv.Index(j) - // rv.Index(rv.Len() - 1).Set(reflect.Zero(rtelem0)) - if resetSliceElemToZeroValue { - rv9.Set(reflect.Zero(rtelem0)) - } - rvlen++ - rvChanged = true - } - } else { // slice or array - rv9 = rv.Index(j) - } - slh.ElemContainerState(j) - if decodeIntoBlank { - d.swallow() - } else { // seqTypeSlice - d.decodeValue(rv9, fn) - } - } - } - if f.seq == seqTypeSlice { - if j < rvlen { - rv.SetLen(j) - } else if j == 0 && rv.IsNil() { - rv = reflect.MakeSlice(ti.rt, 0, 0) + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen rvChanged = true } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rv.CanSet() { + rv.SetLen(rvlen) + } else { + rv = rv.Slice(0, rvlen) + rvChanged = true + } + } + } + + var recognizedRtid, recognizedRtidPtr bool + if useLookupRecognizedTypes { + recognizedRtid = isRecognizedRtid(rtElem0Id) + recognizedRtidPtr = isRecognizedRtidPtr(rtElem0Id) + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else { + rvlen = 8 + } + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else if f.seq == seqTypeChan { + rv.Set(reflect.MakeChan(ti.rt, rvlen)) + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if useLookupRecognizedTypes && (recognizedRtid || recognizedRtidPtr) { + d.decode(rv2i(rv9.Addr())) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // uses append logic, plus varargs + var rvcap2 int + rv9, rvcap2, rvChanged = decExpandSliceRV(rv, ti.rt, rtelem0Size, 1, rvlen, rvcap) + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + } + if decodeAsNil { + continue + } + + if useLookupRecognizedTypes && recognizedRtid { + d.decode(rv2i(rv9.Addr())) + } else if useLookupRecognizedTypes && recognizedRtidPtr { // && !rv9.IsNil() { + if rv9.IsNil() { + rv9.Set(reflect.New(rtelem)) + } + d.decode(rv2i(rv9)) + } else { + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, false, true) + } + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else { + rv = rv.Slice(0, j) + rvChanged = true + } + rvlen = j + } else if j == 0 && rv.IsNil() { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true } } slh.End() @@ -1029,41 +1420,48 @@ func (f *decFnInfo) kSlice(rv reflect.Value) { } } -func (f *decFnInfo) kArray(rv reflect.Value) { - // f.d.decodeValue(rv.Slice(0, rv.Len())) - f.kSlice(rv.Slice(0, rv.Len())) -} +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } -func (f *decFnInfo) kMap(rv reflect.Value) { - d := f.d +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { dd := d.d containerLen := dd.ReadMapStart() - cr := d.cr + elemsep := d.hh.hasElemSeparators() ti := f.ti if rv.IsNil() { - rv.Set(reflect.MakeMap(ti.rt)) + rv.Set(makeMapReflect(ti.rt, containerLen)) } if containerLen == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return } ktype, vtype := ti.rt.Key(), ti.rt.Elem() - ktypeId := reflect.ValueOf(ktype).Pointer() + ktypeId := rt2id(ktype) + vtypeId := rt2id(vtype) vtypeKind := vtype.Kind() - var keyFn, valFn *decFn - var xtyp reflect.Type - for xtyp = ktype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + var recognizedKtyp, recognizedVtyp, recognizedPtrKtyp, recognizedPtrVtyp bool + if useLookupRecognizedTypes { + recognizedKtyp = isRecognizedRtid(ktypeId) + recognizedVtyp = isRecognizedRtid(vtypeId) + recognizedPtrKtyp = isRecognizedRtidPtr(ktypeId) + recognizedPtrVtyp = isRecognizedRtidPtr(vtypeId) } - keyFn = d.getDecFn(xtyp, true, true) - for xtyp = vtype; xtyp.Kind() == reflect.Ptr; xtyp = xtyp.Elem() { + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { } - valFn = d.getDecFn(xtyp, true, true) + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + var mapGet, mapSet bool - if !f.d.h.MapValueReset { + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { // if pointer, mapGet = true // if interface, mapGet = true if !DecodeNakedAlways (else false) // if builtin, mapGet = false @@ -1071,118 +1469,140 @@ func (f *decFnInfo) kMap(rv reflect.Value) { if vtypeKind == reflect.Ptr { mapGet = true } else if vtypeKind == reflect.Interface { - if !f.d.h.InterfaceReset { + if !d.h.InterfaceReset { mapGet = true } - } else if !isImmutableKind(vtypeKind) { + } else if !rvvImmut { mapGet = true } } - var rvk, rvv, rvz reflect.Value - - // for j := 0; j < containerLen; j++ { - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + if dd.TryDecodeAsNil() { + // Previously, if a nil key, we just ignored the mapped value and continued. + // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // to be an empty map. + // Instead, we treat a nil key as the zero value of the type. + rvk.Set(reflect.Zero(ktype)) + } else if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else if useLookupRecognizedTypes && recognizedKtyp { + d.decode(rv2i(rvkp)) + // rvk = rvkp.Elem() //TODO: remove, unnecessary + } else if useLookupRecognizedTypes && recognizedPtrKtyp { + if rvk.IsNil() { + rvk = reflect.New(ktypeLo) } - d.decodeValue(rvk, keyFn) - - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() + d.decode(rv2i(rvk)) + } else { + if keyFn == nil { + keyFn = d.cf.get(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, false, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + rvk = rvk2 if rvk.Type() == uint8SliceTyp { rvk = reflect.ValueOf(d.string(rvk.Bytes())) } } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } } - } else { - for j := 0; !dd.CheckBreak(); j++ { - rvk = reflect.New(ktype).Elem() - if cr != nil { - cr.sendContainerState(containerMapKey) - } - d.decodeValue(rvk, keyFn) - // special case if a byte array. - if ktypeId == intfTypId { - rvk = rvk.Elem() - if rvk.Type() == uint8SliceTyp { - rvk = reflect.ValueOf(d.string(rvk.Bytes())) - } - } - mapSet = true // set to false if u do a get, and its a pointer, and exists - if mapGet { - rvv = rv.MapIndex(rvk) - if rvv.IsValid() { - if vtypeKind == reflect.Ptr { - mapSet = false - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - } else { - if rvz.IsValid() { - rvz.Set(reflect.Zero(vtype)) - } else { - rvz = reflect.New(vtype).Elem() - } - rvv = rvz - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - d.decodeValue(rvv, valFn) - if mapSet { - rv.SetMapIndex(rvk, rvv) - } + if elemsep { + dd.ReadMapElemValue() } - } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } -} -type decRtidFn struct { - rtid uintptr - fn decFn + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if useLookupRecognizedTypes && recognizedVtyp && rvv.CanAddr() { + d.decode(rv2i(rvv.Addr())) + } else if useLookupRecognizedTypes && recognizedPtrVtyp { + if rvv.IsNil() { + rvv = reflect.New(vtypeLo) + mapSet = true + } + d.decode(rv2i(rvv)) + } else { + if valFn == nil { + valFn = d.cf.get(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, false, true) + // d.decodeValueFn(rvv, valFn) + } + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() } // decNaked is used to keep track of the primitives decoded. @@ -1211,37 +1631,80 @@ type decNaked struct { l []byte s string t time.Time + b bool + + inited bool + v valueType - // stacks for reducing allocation - is []interface{} - ms []map[interface{}]interface{} - ns []map[string]interface{} - ss [][]interface{} - // rs []RawExt + li, lm, ln, ls int8 + // array/stacks for reducing allocation // keep arrays at the bottom? Chance is that they are not used much. - ia [4]interface{} - ma [4]map[interface{}]interface{} - na [4]map[string]interface{} - sa [4][]interface{} + ia [arrayCacheLen]interface{} + ma [arrayCacheLen]map[interface{}]interface{} + na [arrayCacheLen]map[string]interface{} + sa [arrayCacheLen][]interface{} // ra [2]RawExt + + rr [5 * arrayCacheLen]reflect.Value +} + +const ( + decNakedUintIdx = iota + decNakedIntIdx + decNakedFloatIdx + decNakedBytesIdx + decNakedStringIdx + decNakedTimeIdx + decNakedBoolIdx +) +const ( + _ = iota // maps to the scalars above + decNakedIntfIdx + decNakedMapIntfIntfIdx + decNakedMapStrIntfIdx + decNakedSliceIntfIdx +) + +func (n *decNaked) init() { + if n.inited { + return + } + // n.ms = n.ma[:0] + // n.is = n.ia[:0] + // n.ns = n.na[:0] + // n.ss = n.sa[:0] + + n.rr[decNakedUintIdx] = reflect.ValueOf(&n.u).Elem() + n.rr[decNakedIntIdx] = reflect.ValueOf(&n.i).Elem() + n.rr[decNakedFloatIdx] = reflect.ValueOf(&n.f).Elem() + n.rr[decNakedBytesIdx] = reflect.ValueOf(&n.l).Elem() + n.rr[decNakedStringIdx] = reflect.ValueOf(&n.s).Elem() + n.rr[decNakedTimeIdx] = reflect.ValueOf(&n.t).Elem() + n.rr[decNakedBoolIdx] = reflect.ValueOf(&n.b).Elem() + + for i := range [arrayCacheLen]struct{}{} { + n.rr[decNakedIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ia[i])).Elem() + n.rr[decNakedMapIntfIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.ma[i])).Elem() + n.rr[decNakedMapStrIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.na[i])).Elem() + n.rr[decNakedSliceIntfIdx*arrayCacheLen+i] = reflect.ValueOf(&(n.sa[i])).Elem() + } + n.inited = true + // n.rr[] = reflect.ValueOf(&n.) } func (n *decNaked) reset() { - if n.ss != nil { - n.ss = n.ss[:0] - } - if n.is != nil { - n.is = n.is[:0] - } - if n.ms != nil { - n.ms = n.ms[:0] - } - if n.ns != nil { - n.ns = n.ns[:0] + if n == nil { + return } + n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0 +} + +type rtid2rv struct { + rtid uintptr + rv reflect.Value } // A Decoder reads and decodes an object from an input stream in the codec format. @@ -1252,31 +1715,43 @@ type Decoder struct { d decDriver // NOTE: Decoder shouldn't call it's read methods, // as the handler MAY need to do some coordination. - r decReader - // sa [initCollectionCap]decRtidFn - h *BasicHandle + r decReader hh Handle + h *BasicHandle + + mtr, mtrp, str, strp bool // be bool // is binary encoding bytes bool // is bytes reader js bool // is json handle + // ---- cpu cache line boundary? + rb bytesDecReader ri ioDecReader - cr containerStateRecv + bi bufioDecReader - s []decRtidFn - f map[uintptr]*decFn + // cr containerStateRecv - // _ uintptr // for alignment purposes, so next one starts from a cache line + n *decNaked + nsp *sync.Pool + + // ---- cpu cache line boundary? + + is map[string]string // used for interning strings // cache the mapTypeId and sliceTypeId for faster comparisons mtid uintptr stid uintptr - n decNaked - b [scratchByteArrayLen]byte - is map[string]string // used for interning strings + b [scratchByteArrayLen]byte + // _ uintptr // for alignment purposes, so next one starts from a cache line + + err error + // ---- cpu cache line boundary? + + cf codecFner + // _ [64]byte // force alignment??? } // NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. @@ -1297,65 +1772,87 @@ func NewDecoderBytes(in []byte, h Handle) *Decoder { return d } +var defaultDecNaked decNaked + func newDecoder(h Handle) *Decoder { d := &Decoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - n := &d.n - // n.rs = n.ra[:0] - n.ms = n.ma[:0] - n.is = n.ia[:0] - n.ns = n.na[:0] - n.ss = n.sa[:0] + + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + _, d.js = h.(*JsonHandle) if d.h.InternString { d.is = make(map[string]string, 32) } d.d = h.newDecDriver(d) - d.cr, _ = d.d.(containerStateRecv) - // d.d = h.newDecDriver(decReaderT{true, &d.rb, &d.ri}) + // d.cr, _ = d.d.(containerStateRecv) return d } +// naked must be called before each call to .DecodeNaked, +// as they will use it. +func (d *Decoder) naked() *decNaked { + if d.n == nil { + // consider one of: + // - get from sync.Pool (if GC is frequent, there's no value here) + // - new alloc (safest. only init'ed if it a naked decode will be done) + // - field in Decoder (makes the Decoder struct very big) + // To support using a decoder where a DecodeNaked is not needed, + // we prefer #1 or #2. + // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool + // d.n.init() + var v interface{} + d.nsp, v = pool.decNaked() + d.n = v.(*decNaked) + } + return d.n +} + func (d *Decoder) resetCommon() { d.n.reset() d.d.reset() + d.cf.reset(d.hh) + d.err = nil // reset all things which were cached from the Handle, // but could be changed. d.mtid, d.stid = 0, 0 + d.mtr, d.mtrp, d.str, d.strp = false, false, false, false if d.h.MapType != nil { - d.mtid = reflect.ValueOf(d.h.MapType).Pointer() + d.mtid = rt2id(d.h.MapType) + if useLookupRecognizedTypes { + d.mtr = isRecognizedRtid(d.mtid) + d.mtrp = isRecognizedRtidPtr(d.mtid) + } } if d.h.SliceType != nil { - d.stid = reflect.ValueOf(d.h.SliceType).Pointer() + d.stid = rt2id(d.h.SliceType) + if useLookupRecognizedTypes { + d.str = isRecognizedRtid(d.stid) + d.strp = isRecognizedRtidPtr(d.stid) + } } } func (d *Decoder) Reset(r io.Reader) { - d.ri.x = &d.b - // d.s = d.sa[:0] - d.ri.bs.r = r - var ok bool - d.ri.br, ok = r.(decReaderByteScanner) - if !ok { - d.ri.br = &d.ri.bs + if d.h.ReaderBufferSize > 0 { + d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize) + d.bi.reset(r) + d.r = &d.bi + } else { + d.ri.x = &d.b + // d.s = d.sa[:0] + d.ri.reset(r) + d.r = &d.ri } - d.r = &d.ri d.resetCommon() } func (d *Decoder) ResetBytes(in []byte) { - // d.s = d.sa[:0] d.bytes = true d.rb.reset(in) d.r = &d.rb d.resetCommon() } -// func (d *Decoder) sendContainerState(c containerState) { -// if d.cr != nil { -// d.cr.sendContainerState(c) -// } -// } - // Decode decodes the stream from reader and stores the result in the // value pointed to by v. v cannot be a nil pointer. v can also be // a reflect.Value of a pointer. @@ -1407,170 +1904,168 @@ func (d *Decoder) ResetBytes(in []byte) { // to its "zero" value (e.g. nil for slice/map, etc). // func (d *Decoder) Decode(v interface{}) (err error) { - defer panicToErr(&err) - d.decode(v) + defer panicToErrs2(&d.err, &err) + d.MustDecode(v) return } -// this is not a smart swallow, as it allocates objects and does unnecessary work. -func (d *Decoder) swallowViaHammer() { - var blank interface{} - d.decodeValue(reflect.ValueOf(&blank).Elem(), nil) +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.err != nil { + panic(d.err) + } + if d.d.TryDecodeAsNil() { + d.setZero(v) + } else { + d.decode(v) + } + if d.nsp != nil { + if d.n != nil { + d.nsp.Put(d.n) + d.n = nil + } + d.nsp = nil + } + d.n = nil + // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) } +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + func (d *Decoder) swallow() { // smarter decode that just swallows the content dd := d.d if dd.TryDecodeAsNil() { return } - cr := d.cr + elemsep := d.hh.hasElemSeparators() switch dd.ContainerType() { case valueTypeMap: containerLen := dd.ReadMapStart() - clenGtEqualZero := containerLen >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLen { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerMapKey) + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() } d.swallow() - if cr != nil { - cr.sendContainerState(containerMapValue) + if elemsep { + dd.ReadMapElemValue() } d.swallow() } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() case valueTypeArray: - containerLenS := dd.ReadArrayStart() - clenGtEqualZero := containerLenS >= 0 - for j := 0; ; j++ { - if clenGtEqualZero { - if j >= containerLenS { - break - } - } else if dd.CheckBreak() { - break - } - if cr != nil { - cr.sendContainerState(containerArrayElem) + containerLen := dd.ReadArrayStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() } d.swallow() } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + dd.ReadArrayEnd() case valueTypeBytes: - dd.DecodeBytes(d.b[:], false, true) + dd.DecodeBytes(d.b[:], true) case valueTypeString: - dd.DecodeBytes(d.b[:], true, true) - // dd.DecodeStringAsBytes(d.b[:]) + dd.DecodeStringAsBytes() default: // these are all primitives, which we can get from decodeNaked // if RawExt using Value, complete the processing. + n := d.naked() dd.DecodeNaked() - if n := &d.n; n.v == valueTypeExt && n.l == nil { - l := len(n.is) - n.is = append(n.is, nil) - v2 := &n.is[l] - d.decode(v2) - n.is = n.is[:l] + if n.v == valueTypeExt && n.l == nil { + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + n.ia[n.li-1] = nil + n.li-- + } else { + var v2 interface{} + d.decode(&v2) + } } } } -// MustDecode is like Decode, but panics if unable to Decode. -// This provides insight to the code location that triggered the error. -func (d *Decoder) MustDecode(v interface{}) { - d.decode(v) +func (d *Decoder) setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } } func (d *Decoder) decode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecDecodeSelf(d) - // return - // } - - if d.d.TryDecodeAsNil() { - switch v := iv.(type) { - case nil: - case *string: - *v = "" - case *bool: - *v = false - case *int: - *v = 0 - case *int8: - *v = 0 - case *int16: - *v = 0 - case *int32: - *v = 0 - case *int64: - *v = 0 - case *uint: - *v = 0 - case *uint8: - *v = 0 - case *uint16: - *v = 0 - case *uint32: - *v = 0 - case *uint64: - *v = 0 - case *float32: - *v = 0 - case *float64: - *v = 0 - case *[]uint8: - *v = nil - case *Raw: - *v = nil - case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - v = v.Elem() - if v.IsValid() { - v.Set(reflect.Zero(v.Type())) - } - default: - rv := reflect.ValueOf(iv) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - rv = rv.Elem() - if rv.IsValid() { - rv.Set(reflect.Zero(rv.Type())) - } - } + // check nil and interfaces explicitly, + // so that type switches just have a run of constant non-interface types. + if iv == nil { + d.error(cannotDecodeIntoNilErr) + return + } + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) return } switch v := iv.(type) { - case nil: - d.error(cannotDecodeIntoNilErr) - return - - case Selfer: - v.CodecDecodeSelf(d) + // case nil: + // case Selfer: case reflect.Value: - if v.Kind() != reflect.Ptr || v.IsNil() { - d.errNotValidPtrValue(v) - } - // d.chkPtrValue(v) - d.decodeValueNotNil(v.Elem(), nil) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, true) // TODO: maybe ask to recognize ... case *string: *v = d.d.DecodeString() @@ -1601,229 +2096,70 @@ func (d *Decoder) decode(iv interface{}) { case *float64: *v = d.d.DecodeFloat(false) case *[]uint8: - *v = d.d.DecodeBytes(*v, false, false) + *v = d.d.DecodeBytes(*v, false) case *Raw: - *v = d.raw() + *v = d.rawBytes() case *interface{}: - d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, false, true) // TODO: consider recognize here + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) default: if !fastpathDecodeTypeSwitch(iv, d) { - d.decodeI(iv, true, false, false, false) + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false, false) + // d.decodeValueFallback(v) } } } -func (d *Decoder) preDecodeValue(rv reflect.Value, tryNil bool) (rv2 reflect.Value, proceed bool) { - if tryNil && d.d.TryDecodeAsNil() { - // No need to check if a ptr, recursively, to determine - // whether to set value to nil. - // Just always set value to its zero type. - if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid - rv.Set(reflect.Zero(rv.Type())) - } - return - } - +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, tryRecognized, chkAll bool) { // If stream is not containing a nil value, then we can deref to the base // non-pointer value, and decode into that. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - return rv, true -} - -func (d *Decoder) decodeI(iv interface{}, checkPtr, tryNil, checkFastpath, checkCodecSelfer bool) { - rv := reflect.ValueOf(iv) - if checkPtr { - if rv.Kind() != reflect.Ptr || rv.IsNil() { - d.errNotValidPtrValue(rv) - } - // d.chkPtrValue(rv) - } - rv, proceed := d.preDecodeValue(rv, tryNil) - if proceed { - fn := d.getDecFn(rv.Type(), checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValue(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, true); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) decodeValueNotNil(rv reflect.Value, fn *decFn) { - if rv, proceed := d.preDecodeValue(rv, false); proceed { - if fn == nil { - fn = d.getDecFn(rv.Type(), true, true) - } - fn.f(&fn.i, rv) - } -} - -func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *decFn) { - rtid := reflect.ValueOf(rt).Pointer() - - // retrieve or register a focus'ed function for this type - // to eliminate need to do the retrieval multiple times - - // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } - var ok bool - if useMapForCodecCache { - fn, ok = d.f[rtid] - } else { - for i := range d.s { - v := &(d.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { break } } } - if ok { - return + + if useLookupRecognizedTypes && tryRecognized && isRecognizedRtid(rv2rtid(rv)) { + if rvpValid { + d.decode(rv2i(rvp)) + return + } else if rv.CanAddr() { + d.decode(rv2i(rv.Addr())) + return + } } - if useMapForCodecCache { - if d.f == nil { - d.f = make(map[uintptr]*decFn, initCollectionCap) + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.cf.get(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else { + fn.fd(d, &fn.i, rv) } - fn = new(decFn) - d.f[rtid] = fn } else { - if d.s == nil { - d.s = make([]decRtidFn, 0, initCollectionCap) - } - d.s = append(d.s, decRtidFn{rtid: rtid}) - fn = &(d.s[len(d.s)-1]).fn + fn.fd(d, &fn.i, rv) } - - // debugf("\tCreating new dec fn for type: %v\n", rt) - ti := d.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.d = d - fi.ti = ti - - // An extension can be registered for any type, regardless of the Kind - // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. - // - // We can't check if it's an extension byte here first, because the user may have - // registered a pointer or non-pointer type, meaning we may have to recurse first - // before matching a mapped type, even though the extension byte is already detected. - // - // NOTE: if decoding into a nil interface{}, we return a non-nil - // value except even if the container registers a length of 0. - if checkCodecSelfer && ti.cs { - fn.f = (*decFnInfo).selferUnmarshal - } else if rtid == rawExtTypId { - fn.f = (*decFnInfo).rawExt - } else if rtid == rawTypId { - fn.f = (*decFnInfo).raw - } else if d.d.IsBuiltinType(rtid) { - fn.f = (*decFnInfo).builtin - } else if xfFn := d.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*decFnInfo).ext - } else if supportMarshalInterfaces && d.be && ti.bunm { - fn.f = (*decFnInfo).binaryUnmarshal - } else if supportMarshalInterfaces && !d.be && d.js && ti.junm { - //If JSON, we should check JSONUnmarshal before textUnmarshal - fn.f = (*decFnInfo).jsonUnmarshal - } else if supportMarshalInterfaces && !d.be && ti.tunm { - fn.f = (*decFnInfo).textUnmarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].decfn - } - } else { - // use mapping for underlying type if there - ok = false - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].decfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *decFnInfo, xrv reflect.Value) { - // xfnf(xf, xrv.Convert(xrt)) - xfnf(xf, xrv.Addr().Convert(reflect.PtrTo(xrt)).Elem()) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.String: - fn.f = (*decFnInfo).kString - case reflect.Bool: - fn.f = (*decFnInfo).kBool - case reflect.Int: - fn.f = (*decFnInfo).kInt - case reflect.Int64: - fn.f = (*decFnInfo).kInt64 - case reflect.Int32: - fn.f = (*decFnInfo).kInt32 - case reflect.Int8: - fn.f = (*decFnInfo).kInt8 - case reflect.Int16: - fn.f = (*decFnInfo).kInt16 - case reflect.Float32: - fn.f = (*decFnInfo).kFloat32 - case reflect.Float64: - fn.f = (*decFnInfo).kFloat64 - case reflect.Uint8: - fn.f = (*decFnInfo).kUint8 - case reflect.Uint64: - fn.f = (*decFnInfo).kUint64 - case reflect.Uint: - fn.f = (*decFnInfo).kUint - case reflect.Uint32: - fn.f = (*decFnInfo).kUint32 - case reflect.Uint16: - fn.f = (*decFnInfo).kUint16 - // case reflect.Ptr: - // fn.f = (*decFnInfo).kPtr - case reflect.Uintptr: - fn.f = (*decFnInfo).kUintptr - case reflect.Interface: - fn.f = (*decFnInfo).kInterface - case reflect.Struct: - fn.f = (*decFnInfo).kStruct - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*decFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*decFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*decFnInfo).kArray - case reflect.Map: - fn.f = (*decFnInfo).kMap - default: - fn.f = (*decFnInfo).kErr - } - } - } - - return + // return rv } func (d *Decoder) structFieldNotFound(index int, rvkencname string) { @@ -1846,15 +2182,33 @@ func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { } } -func (d *Decoder) chkPtrValue(rv reflect.Value) { - // We can only decode into a non-nil pointer - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - return +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, true + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } } - d.errNotValidPtrValue(rv) + return } -func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } if !rv.IsValid() { d.error(cannotDecodeIntoNilErr) return @@ -1863,10 +2217,32 @@ func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { d.errorf("cannot decode into a value without an interface: %v", rv) return } - rvi := rv.Interface() - d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) + rvi := rv2i(rv) + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rv.Kind(), rvi, rvi) + return } +// func (d *Decoder) chkPtrValue(rv reflect.Value) { +// // We can only decode into a non-nil pointer +// if rv.Kind() == reflect.Ptr && !rv.IsNil() { +// return +// } +// d.errNotValidPtrValue(rv) +// } + +// func (d *Decoder) errNotValidPtrValue(rv reflect.Value) { +// if !rv.IsValid() { +// d.error(cannotDecodeIntoNilErr) +// return +// } +// if !rv.CanInterface() { +// d.errorf("cannot decode into a value without an interface: %v", rv) +// return +// } +// rvi := rv2i(rv) +// d.errorf("cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", rv.Kind(), rvi, rvi) +// } + func (d *Decoder) error(err error) { panic(err) } @@ -1881,34 +2257,30 @@ func (d *Decoder) errorf(format string, params ...interface{}) { // Possibly get an interned version of a string // -// This should mostly be used for map keys, where the key type is string +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. func (d *Decoder) string(v []byte) (s string) { - if d.is != nil { - s, ok := d.is[string(v)] // no allocation here, per go implementation - if !ok { - s = string(v) // new allocation here - d.is[s] = s - } - return s + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. } - return string(v) // don't return stringView, as we need a real string here. + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s } -// func (d *Decoder) intern(s string) { -// if d.is != nil { -// d.is[s] = s -// } -// } - // nextValueBytes returns the next value in the stream as a set of bytes. -func (d *Decoder) nextValueBytes() []byte { +func (d *Decoder) nextValueBytes() (bs []byte) { d.d.uncacheRead() d.r.track() d.swallow() - return d.r.stopTrack() + bs = d.r.stopTrack() + return } -func (d *Decoder) raw() []byte { +func (d *Decoder) rawBytes() []byte { // ensure that this is not a view into the bytes // i.e. make new copy always. bs := d.nextValueBytes() @@ -1944,29 +2316,21 @@ func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { } func (x decSliceHelper) End() { - cr := x.d.cr - if cr == nil { - return - } if x.array { - cr.sendContainerState(containerArrayEnd) + x.d.d.ReadArrayEnd() } else { - cr.sendContainerState(containerMapEnd) + x.d.d.ReadMapEnd() } } func (x decSliceHelper) ElemContainerState(index int) { - cr := x.d.cr - if cr == nil { - return - } if x.array { - cr.sendContainerState(containerArrayElem) + x.d.d.ReadArrayElem() } else { if index%2 == 0 { - cr.sendContainerState(containerMapKey) + x.d.d.ReadMapElemKey() } else { - cr.sendContainerState(containerMapValue) + x.d.d.ReadMapElemValue() } } } @@ -1983,12 +2347,11 @@ func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { r.readb(bsOut) } else { // bsOut = make([]byte, clen) - len2, _ := decInferLen(clen, maxInitLen, 1) + len2 := decInferLen(clen, maxInitLen, 1) bsOut = make([]byte, len2) r.readb(bsOut) for len2 < clen { - len3, _ := decInferLen(clen-len2, maxInitLen, 1) - // fmt.Printf(">>>>> TESTING: in loop: clen: %v, maxInitLen: %v, len2: %v, len3: %v\n", clen, maxInitLen, len2, len3) + len3 := decInferLen(clen-len2, maxInitLen, 1) bs3 := bsOut bsOut = make([]byte, len2+len3) copy(bsOut, bs3) @@ -2019,11 +2382,14 @@ func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte // - maxlen: max length to be returned. // if <= 0, it is unset, and we infer it based on the unit size // - unit: number of bytes for each element of the collection -func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { +func decInferLen(clen, maxlen, unit int) (rvlen int) { // handle when maxlen is not set i.e. <= 0 if clen <= 0 { return } + if unit == 0 { + return clen + } if maxlen <= 0 { // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. // maxlen = 256 * 1024 / unit @@ -2038,39 +2404,49 @@ func decInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { } if clen > maxlen { rvlen = maxlen - truncated = true } else { rvlen = clen } return - // if clen <= 0 { - // rvlen = 0 - // } else if maxlen > 0 && clen > maxlen { - // rvlen = maxlen - // truncated = true - // } else { - // rvlen = clen - // } - // return } -// // implement overall decReader wrapping both, for possible use inline: -// type decReaderT struct { -// bytes bool -// rb *bytesDecReader -// ri *ioDecReader -// } -// -// // implement *Decoder as a decReader. -// // Using decReaderT (defined just above) caused performance degradation -// // possibly because of constant copying the value, -// // and some value->interface conversion causing allocation. -// func (d *Decoder) unreadn1() { -// if d.bytes { -// d.rb.unreadn1() -// } else { -// d.ri.unreadn1() -// } -// } -// ... for other methods of decReader. -// Testing showed that performance improvement was negligible. +func decExpandSliceRV(s reflect.Value, st reflect.Type, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool) { + l1 := slen + num // new slice length + if l1 < slen { + panic("expandSlice: slice overflow") + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n int, err error) { + var nn int + for n < len(bs) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += nn + } + } + + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go index 268154d24c..132ddf15de 100644 --- a/vendor/github.com/ugorji/go/codec/encode.go +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -4,6 +4,7 @@ package codec import ( + "bufio" "encoding" "fmt" "io" @@ -12,9 +13,7 @@ import ( "sync" ) -const ( - defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 -) +const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 // AsSymbolFlag defines what should be encoded as symbols. type AsSymbolFlag uint8 @@ -49,7 +48,7 @@ type encWriter interface { // encDriver abstracts the actual codec (binc vs msgpack, etc) type encDriver interface { - IsBuiltinType(rt uintptr) bool + // IsBuiltinType(rt uintptr) bool EncodeBuiltin(rt uintptr, v interface{}) EncodeNil() EncodeInt(i int64) @@ -60,35 +59,56 @@ type encDriver interface { // encodeExtPreamble(xtag byte, length int) EncodeRawExt(re *RawExt, e *Encoder) EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) - EncodeArrayStart(length int) - EncodeMapStart(length int) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() EncodeString(c charEncoding, v string) EncodeSymbol(v string) EncodeStringBytes(c charEncoding, v []byte) + //TODO //encBignum(f *big.Int) //encStringRunes(c charEncoding, v []rune) reset() + atEndOfEncode() +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type ioEncFlusher interface { + Flush() error } type encDriverAsis interface { EncodeAsis(v []byte) } -type encNoSeparator struct{} +// type encNoSeparator struct{} +// func (_ encNoSeparator) EncodeEnd() {} -func (_ encNoSeparator) EncodeEnd() {} +type encDriverNoopContainerWriter struct{} -type ioEncWriterWriter interface { - WriteByte(c byte) error - WriteString(s string) (n int, err error) - Write(p []byte) (n int, err error) -} +func (_ encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteArrayElem() {} +func (_ encDriverNoopContainerWriter) WriteArrayEnd() {} +func (_ encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (_ encDriverNoopContainerWriter) WriteMapElemKey() {} +func (_ encDriverNoopContainerWriter) WriteMapElemValue() {} +func (_ encDriverNoopContainerWriter) WriteMapEnd() {} +func (_ encDriverNoopContainerWriter) atEndOfEncode() {} -type ioEncStringWriter interface { - WriteString(s string) (n int, err error) -} +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } type EncodeOptions struct { // Encode a struct as an array, and not as a map @@ -146,83 +166,111 @@ type EncodeOptions struct { // AsSymbolMapStringKeys // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag AsSymbols AsSymbolFlag + + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int } // --------------------------------------------- -type simpleIoEncWriterWriter struct { - w io.Writer - bw io.ByteWriter - sw ioEncStringWriter - bs [1]byte +type simpleIoEncWriter struct { + io.Writer } -func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { - if o.bw != nil { - return o.bw.WriteByte(c) - } - // _, err = o.w.Write([]byte{c}) - o.bs[0] = c - _, err = o.w.Write(o.bs[:]) - return -} +// type bufIoEncWriter struct { +// w io.Writer +// buf []byte +// err error +// } -func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { - if o.sw != nil { - return o.sw.WriteString(s) - } - // return o.w.Write([]byte(s)) - return o.w.Write(bytesView(s)) -} - -func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { - return o.w.Write(p) -} - -// ---------------------------------------- +// func (x *bufIoEncWriter) Write(b []byte) (n int, err error) { +// if x.err != nil { +// return 0, x.err +// } +// if cap(x.buf)-len(x.buf) >= len(b) { +// x.buf = append(x.buf, b) +// return len(b), nil +// } +// n, err = x.w.Write(x.buf) +// if err != nil { +// x.err = err +// return 0, x.err +// } +// n, err = x.w.Write(b) +// x.err = err +// return +// } // ioEncWriter implements encWriter and can write to an io.Writer implementation type ioEncWriter struct { - w ioEncWriterWriter - s simpleIoEncWriterWriter - // x [8]byte // temp byte array re-used internally for efficiency + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioEncFlusher + b [8]byte +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + // x.bs[0] = b + // _, err = x.ww.Write(x.bs[:]) + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) } func (z *ioEncWriter) writeb(bs []byte) { - if len(bs) == 0 { - return - } - n, err := z.w.Write(bs) - if err != nil { + // if len(bs) == 0 { + // return + // } + if _, err := z.ww.Write(bs); err != nil { panic(err) } - if n != len(bs) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n)) - } } func (z *ioEncWriter) writestr(s string) { - n, err := z.w.WriteString(s) - if err != nil { + // if len(s) == 0 { + // return + // } + if _, err := z.sw.WriteString(s); err != nil { panic(err) } - if n != len(s) { - panic(fmt.Errorf("incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n)) - } } func (z *ioEncWriter) writen1(b byte) { - if err := z.w.WriteByte(b); err != nil { + if err := z.bw.WriteByte(b); err != nil { panic(err) } } -func (z *ioEncWriter) writen2(b1 byte, b2 byte) { - z.writen1(b1) - z.writen1(b2) +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) } -func (z *ioEncWriter) atEndOfEncode() {} +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } + +func (z *ioEncWriter) atEndOfEncode() { + if z.fw != nil { + z.fw.Flush() + } +} // ---------------------------------------- @@ -235,9 +283,6 @@ type bytesEncWriter struct { } func (z *bytesEncWriter) writeb(s []byte) { - if len(s) == 0 { - return - } oc, a := z.growNoAlloc(len(s)) if a { z.growAlloc(len(s), oc) @@ -246,9 +291,6 @@ func (z *bytesEncWriter) writeb(s []byte) { } func (z *bytesEncWriter) writestr(s string) { - if len(s) == 0 { - return - } oc, a := z.growNoAlloc(len(s)) if a { z.growAlloc(len(s), oc) @@ -264,7 +306,7 @@ func (z *bytesEncWriter) writen1(b1 byte) { z.b[oc] = b1 } -func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { +func (z *bytesEncWriter) writen2(b1, b2 byte) { oc, a := z.growNoAlloc(2) if a { z.growAlloc(2, oc) @@ -304,158 +346,127 @@ func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { // --------------------------------------------- -type encFnInfo struct { - e *Encoder - ti *typeInfo - xfFn Ext - xfTag uint64 - seq seqType +func (e *Encoder) builtin(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBuiltin(f.ti.rtid, rv2i(rv)) } -func (f *encFnInfo) builtin(rv reflect.Value) { - f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) } -func (f *encFnInfo) raw(rv reflect.Value) { - f.e.raw(rv.Interface().(Raw)) +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + // rev := rv2i(rv).(RawExt) + // e.e.EncodeRawExt(&rev, e) + // var re *RawExt + // if rv.CanAddr() { + // re = rv2i(rv.Addr()).(*RawExt) + // } else { + // rev := rv2i(rv).(RawExt) + // re = &rev + // } + // e.e.EncodeRawExt(re, e) + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) } -func (f *encFnInfo) rawExt(rv reflect.Value) { - // rev := rv.Interface().(RawExt) - // f.e.e.EncodeRawExt(&rev, f.e) - var re *RawExt - if rv.CanAddr() { - re = rv.Addr().Interface().(*RawExt) - } else { - rev := rv.Interface().(RawExt) - re = &rev - } - f.e.e.EncodeRawExt(re, f.e) -} - -func (f *encFnInfo) ext(rv reflect.Value) { +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { // if this is a struct|array and it was addressable, then pass the address directly (not the value) - if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { - rv = rv.Addr() - } - f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e) + // if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() { + // rv = rv.Addr() + // } + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) } -func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) { - if indir == 0 { - v = rv.Interface() - } else if indir == -1 { - // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addressable, else copy value to an addressable value. - if rv.CanAddr() { - v = rv.Addr().Interface() - } else { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - // fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v) - } - } else { - for j := int8(0); j < indir; j++ { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - rv = rv.Elem() - } - v = rv.Interface() - } - return v, true +// func rviptr(rv reflect.Value) (v interface{}) { +// // If a non-pointer was passed to Encode(), then that value is not addressable. +// // Take addr if addressable, else copy value to an addressable value. +// if rv.CanAddr() { +// v = rv2i(rv.Addr()) +// } else { +// rv2 := reflect.New(rv.Type()) +// rv2.Elem().Set(rv) +// v = rv2i(rv2) +// } +// return v +// } + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) } -func (f *encFnInfo) selferMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.csIndir); proceed { - v.(Selfer).CodecEncodeSelf(f.e) - } +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshal(bs, fnerr, false, c_RAW) } -func (f *encFnInfo) binaryMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.bmIndir); proceed { - bs, fnerr := v.(encoding.BinaryMarshaler).MarshalBinary() - f.e.marshal(bs, fnerr, false, c_RAW) - } +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshal(bs, fnerr, false, c_UTF8) } -func (f *encFnInfo) textMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed { - // debugf(">>>> encoding.TextMarshaler: %T", rv.Interface()) - bs, fnerr := v.(encoding.TextMarshaler).MarshalText() - f.e.marshal(bs, fnerr, false, c_UTF8) - } +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshal(bs, fnerr, true, c_UTF8) } -func (f *encFnInfo) jsonMarshal(rv reflect.Value) { - if v, proceed := f.getValueForMarshalInterface(rv, f.ti.jmIndir); proceed { - bs, fnerr := v.(jsonMarshaler).MarshalJSON() - f.e.marshal(bs, fnerr, true, c_UTF8) - } +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) } -func (f *encFnInfo) kBool(rv reflect.Value) { - f.e.e.EncodeBool(rv.Bool()) +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeString(c_UTF8, rv.String()) } -func (f *encFnInfo) kString(rv reflect.Value) { - f.e.e.EncodeString(c_UTF8, rv.String()) +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) } -func (f *encFnInfo) kFloat64(rv reflect.Value) { - f.e.e.EncodeFloat64(rv.Float()) +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) } -func (f *encFnInfo) kFloat32(rv reflect.Value) { - f.e.e.EncodeFloat32(float32(rv.Float())) +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) } -func (f *encFnInfo) kInt(rv reflect.Value) { - f.e.e.EncodeInt(rv.Int()) +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) } -func (f *encFnInfo) kUint(rv reflect.Value) { - f.e.e.EncodeUint(rv.Uint()) +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() } -func (f *encFnInfo) kInvalid(rv reflect.Value) { - f.e.e.EncodeNil() +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) } -func (f *encFnInfo) kErr(rv reflect.Value) { - f.e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) -} - -func (f *encFnInfo) kSlice(rv reflect.Value) { +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { ti := f.ti + ee := e.e // array may be non-addressable, so we have to manage with care // (don't call rv.Bytes, rv.Slice, etc). // E.g. type struct S{B [2]byte}; // Encode(S{}) will bomb on "panic: slice of unaddressable array". - e := f.e if f.seq != seqTypeArray { if rv.IsNil() { - e.e.EncodeNil() + ee.EncodeNil() return } // If in this method, then there was no extension function defined. // So it's okay to treat as []byte. if ti.rtid == uint8SliceTypId { - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) + ee.EncodeStringBytes(c_RAW, rv.Bytes()) return } } - cr := e.cr + elemsep := e.hh.hasElemSeparators() rtelem := ti.rt.Elem() l := rv.Len() if ti.rtid == uint8SliceTypId || rtelem.Kind() == reflect.Uint8 { switch f.seq { case seqTypeArray: - // if l == 0 { e.e.encodeStringBytes(c_RAW, nil) } else if rv.CanAddr() { - e.e.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) + ee.EncodeStringBytes(c_RAW, rv.Slice(0, l).Bytes()) } else { var bs []byte if l <= cap(e.b) { @@ -464,27 +475,24 @@ func (f *encFnInfo) kSlice(rv reflect.Value) { bs = make([]byte, l) } reflect.Copy(reflect.ValueOf(bs), rv) - // TODO: Test that reflect.Copy works instead of manual one-by-one - // for i := 0; i < l; i++ { - // bs[i] = byte(rv.Index(i).Uint()) - // } - e.e.EncodeStringBytes(c_RAW, bs) + ee.EncodeStringBytes(c_RAW, bs) } + return case seqTypeSlice: - e.e.EncodeStringBytes(c_RAW, rv.Bytes()) - case seqTypeChan: - bs := e.b[:0] - // do not use range, so that the number of elements encoded - // does not change, and encoding does not hang waiting on someone to close chan. - // for b := range rv.Interface().(<-chan byte) { - // bs = append(bs, b) - // } - ch := rv.Interface().(<-chan byte) - for i := 0; i < l; i++ { - bs = append(bs, <-ch) - } - e.e.EncodeStringBytes(c_RAW, bs) + ee.EncodeStringBytes(c_RAW, rv.Bytes()) + return } + } + if ti.rtid == uint8SliceTypId && f.seq == seqTypeChan { + bs := e.b[:0] + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + ch := rv2i(rv).(<-chan byte) + for i := 0; i < l; i++ { + bs = append(bs, <-ch) + } + ee.EncodeStringBytes(c_RAW, bs) return } @@ -493,78 +501,174 @@ func (f *encFnInfo) kSlice(rv reflect.Value) { e.errorf("mapBySlice requires even slice length, but got %v", l) return } - e.e.EncodeMapStart(l / 2) + ee.WriteMapStart(l / 2) } else { - e.e.EncodeArrayStart(l) + ee.WriteArrayStart(l) } if l > 0 { - for rtelem.Kind() == reflect.Ptr { - rtelem = rtelem.Elem() + var fn *codecFn + var recognizedVtyp bool + if useLookupRecognizedTypes { + recognizedVtyp = isRecognizedRtidOrPtr(rt2id(rtelem)) } - // if kind is reflect.Interface, do not pre-determine the - // encoding type, because preEncodeValue may break it down to - // a concrete type and kInterface will bomb. - var fn *encFn - if rtelem.Kind() != reflect.Interface { - rtelemid := reflect.ValueOf(rtelem).Pointer() - fn = e.getEncFn(rtelemid, rtelem, true, true) + if !(useLookupRecognizedTypes && recognizedVtyp) { + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.cf.get(rtelem, true, true) + } } // TODO: Consider perf implication of encoding odd index values as symbols if type is string for j := 0; j < l; j++ { - if cr != nil { + if elemsep { if ti.mbs { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } else { - cr.sendContainerState(containerArrayElem) + ee.WriteArrayElem() } } if f.seq == seqTypeChan { if rv2, ok2 := rv.Recv(); ok2 { - e.encodeValue(rv2, fn) + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv2)) + } else { + e.encodeValue(rv2, fn, true) + } } else { - e.encode(nil) // WE HAVE TO DO SOMETHING, so nil if nothing received. + ee.EncodeNil() // WE HAVE TO DO SOMETHING, so nil if nothing received. } } else { - e.encodeValue(rv.Index(j), fn) + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.Index(j))) + } else { + e.encodeValue(rv.Index(j), fn, true) + } } } } - if cr != nil { - if ti.mbs { - cr.sendContainerState(containerMapEnd) - } else { - cr.sendContainerState(containerArrayEnd) - } + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() } } -func (f *encFnInfo) kStruct(rv reflect.Value) { +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { fti := f.ti - e := f.e - cr := e.cr + elemsep := e.hh.hasElemSeparators() tisfi := fti.sfip toMap := !(fti.toArray || e.h.StructToArray) - newlen := len(fti.sfi) + if toMap { + tisfi = fti.sfi + } + ee := e.e - // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of sync.Pool is less than the cost of new allocation. - pool, poolv, fkvs := encStructPoolGet(newlen) + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + if !elemsep { + for _, si := range tisfi { + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(si.encName) + } else { + ee.EncodeString(c_UTF8, si.encName) + } + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if !elemsep { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.hh.hasElemSeparators() + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) if toMap { tisfi = fti.sfi } + newlen := len(fti.sfi) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + var spool *sync.Pool + var poolv interface{} + var fkvs []stringRv + if newlen <= 8 { + spool, poolv = pool.stringRv8() + fkvs = poolv.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + spool, poolv = pool.stringRv16() + fkvs = poolv.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + spool, poolv = pool.stringRv32() + fkvs = poolv.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + spool, poolv = pool.stringRv64() + fkvs = poolv.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + spool, poolv = pool.stringRv128() + fkvs = poolv.(*[128]stringRv)[:newlen] + } else { + fkvs = make([]stringRv, newlen) + } + newlen = 0 var kv stringRv recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} for _, si := range tisfi { - kv.r = si.field(rv, false) + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) if toMap { if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { continue @@ -584,88 +688,69 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { newlen++ } - // debugf(">>>> kStruct: newlen: %v", newlen) - // sep := !e.be - ee := e.e //don't dereference every time - if toMap { - ee.EncodeMapStart(newlen) + ee.WriteMapStart(newlen) // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerMapKey) + if !elemsep { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + e.encodeValue(kv.r, nil, true) } - if asSymbols { - ee.EncodeSymbol(kv.v) - } else { - ee.EncodeString(c_UTF8, kv.v) + } else { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + if asSymbols { + ee.EncodeSymbol(kv.v) + } else { + ee.EncodeString(c_UTF8, kv.v) + } + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerMapEnd) } + ee.WriteMapEnd() } else { - ee.EncodeArrayStart(newlen) - for j := 0; j < newlen; j++ { - kv = fkvs[j] - if cr != nil { - cr.sendContainerState(containerArrayElem) + ee.WriteArrayStart(newlen) + if !elemsep { + for j := 0; j < newlen; j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) } - e.encodeValue(kv.r, nil) - } - if cr != nil { - cr.sendContainerState(containerArrayEnd) } + ee.WriteArrayEnd() } // do not use defer. Instead, use explicit pool return at end of function. // defer has a cost we are trying to avoid. // If there is a panic and these slices are not returned, it is ok. - if pool != nil { - pool.Put(poolv) + if spool != nil { + spool.Put(poolv) } } -// func (f *encFnInfo) kPtr(rv reflect.Value) { -// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") -// if rv.IsNil() { -// f.e.e.encodeNil() -// return -// } -// f.e.encodeValue(rv.Elem()) -// } - -// func (f *encFnInfo) kInterface(rv reflect.Value) { -// println("kInterface called") -// debug.PrintStack() -// if rv.IsNil() { -// f.e.e.EncodeNil() -// return -// } -// f.e.encodeValue(rv.Elem(), nil) -// } - -func (f *encFnInfo) kMap(rv reflect.Value) { - ee := f.e.e +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e if rv.IsNil() { ee.EncodeNil() return } l := rv.Len() - ee.EncodeMapStart(l) - e := f.e - cr := e.cr + ee.WriteMapStart(l) + elemsep := e.hh.hasElemSeparators() if l == 0 { - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() return } var asSymbols bool @@ -676,245 +761,249 @@ func (f *encFnInfo) kMap(rv reflect.Value) { // However, if kind is reflect.Interface, do not pre-determine the // encoding type, because preEncodeValue may break it down to // a concrete type and kInterface will bomb. - var keyFn, valFn *encFn + var keyFn, valFn *codecFn ti := f.ti - rtkey := ti.rt.Key() - rtval := ti.rt.Elem() - rtkeyid := reflect.ValueOf(rtkey).Pointer() - // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String - var keyTypeIsString = rtkeyid == stringTypId - if keyTypeIsString { - asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 - } else { - for rtkey.Kind() == reflect.Ptr { - rtkey = rtkey.Elem() - } - if rtkey.Kind() != reflect.Interface { - rtkeyid = reflect.ValueOf(rtkey).Pointer() - keyFn = e.getEncFn(rtkeyid, rtkey, true, true) - } - } + rtkey0 := ti.rt.Key() + rtkey := rtkey0 + rtval0 := ti.rt.Elem() + rtval := rtval0 + rtkeyid := rt2id(rtkey0) + rtvalid := rt2id(rtval0) for rtval.Kind() == reflect.Ptr { rtval = rtval.Elem() } if rtval.Kind() != reflect.Interface { - rtvalid := reflect.ValueOf(rtval).Pointer() - valFn = e.getEncFn(rtvalid, rtval, true, true) + valFn = e.cf.get(rtval, true, true) } mks := rv.MapKeys() - // for j, lmks := 0, len(mks); j < lmks; j++ { if e.h.Canonical { - e.kMapCanonical(rtkeyid, rtkey, rv, mks, valFn, asSymbols) + e.kMapCanonical(rtkey, rv, mks, valFn, asSymbols) + ee.WriteMapEnd() + return + } + + var recognizedKtyp, recognizedVtyp bool + var keyTypeIsString = rtkeyid == stringTypId + if keyTypeIsString { + asSymbols = e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 } else { - for j := range mks { - if cr != nil { - cr.sendContainerState(containerMapKey) + if useLookupRecognizedTypes { + recognizedKtyp = isRecognizedRtidOrPtr(rtkeyid) + if recognizedKtyp { + goto LABEL1 } - if keyTypeIsString { - if asSymbols { - ee.EncodeSymbol(mks[j].String()) - } else { - ee.EncodeString(c_UTF8, mks[j].String()) - } - } else { - e.encodeValue(mks[j], keyFn) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mks[j]), valFn) + } + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + rtkeyid = rt2id(rtkey) + keyFn = e.cf.get(rtkey, true, true) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) + + // for j, lmks := 0, len(mks); j < lmks; j++ { +LABEL1: + if useLookupRecognizedTypes { + recognizedVtyp = isRecognizedRtidOrPtr(rtvalid) } + for j := range mks { + if elemsep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + if asSymbols { + ee.EncodeSymbol(mks[j].String()) + } else { + ee.EncodeString(c_UTF8, mks[j].String()) + } + } else if useLookupRecognizedTypes && recognizedKtyp { + e.encode(rv2i(mks[j])) + } else { + e.encodeValue(mks[j], keyFn, true) + } + if elemsep { + ee.WriteMapElemValue() + } + if useLookupRecognizedTypes && recognizedVtyp { + e.encode(rv2i(rv.MapIndex(mks[j]))) + } else { + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + } + } + ee.WriteMapEnd() } -func (e *Encoder) kMapCanonical(rtkeyid uintptr, rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *encFn, asSymbols bool) { +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn, asSymbols bool) { ee := e.e - cr := e.cr + elemsep := e.hh.hasElemSeparators() // we previously did out-of-band if an extension was registered. // This is not necessary, as the natural kind is sufficient for ordering. - if rtkeyid == uint8SliceTypId { - mksv := make([]bytesRv, len(mks)) + // WHAT IS THIS? rtkeyid can never be a []uint8, per spec + // if rtkeyid == uint8SliceTypId { + // mksv := make([]bytesRv, len(mks)) + // for i, k := range mks { + // v := &mksv[i] + // v.r = k + // v.v = k.Bytes() + // } + // sort.Sort(bytesRvSlice(mksv)) + // for i := range mksv { + // if elemsep { + // ee.WriteMapElemKey() + // } + // ee.EncodeStringBytes(c_RAW, mksv[i].v) + // if elemsep { + // ee.WriteMapElemValue() + // } + // e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + // } + // return + // } + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) for i, k := range mks { v := &mksv[i] v.r = k - v.v = k.Bytes() + v.v = k.Bool() } - sort.Sort(bytesRvSlice(mksv)) + sort.Sort(boolRvSlice(mksv)) for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) + if elemsep { + ee.WriteMapElemKey() } - ee.EncodeStringBytes(c_RAW, mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) } - } else { - switch rtkey.Kind() { - case reflect.Bool: - mksv := make([]boolRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Bool() + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() } - sort.Sort(boolRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeBool(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + if asSymbols { + ee.EncodeSymbol(mksv[i].v) + } else { + ee.EncodeString(c_UTF8, mksv[i].v) } - case reflect.String: - mksv := make([]stringRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.String() + if elemsep { + ee.WriteMapElemValue() } - sort.Sort(stringRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - if asSymbols { - ee.EncodeSymbol(mksv[i].v) - } else { - ee.EncodeString(c_UTF8, mksv[i].v) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: - mksv := make([]uintRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Uint() + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() } - sort.Sort(uintRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeUint(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - mksv := make([]intRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Int() + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() } - sort.Sort(intRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeInt(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() } - case reflect.Float32: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat32(float32(mksv[i].v)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() } - case reflect.Float64: - mksv := make([]floatRv, len(mks)) - for i, k := range mks { - v := &mksv[i] - v.r = k - v.v = k.Float() + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() } - sort.Sort(floatRvSlice(mksv)) - for i := range mksv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - ee.EncodeFloat64(mksv[i].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksv[i].r), valFn) + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() } - default: - // out-of-band - // first encode each key to a []byte first, then sort them, then record - var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding - e2 := NewEncoderBytes(&mksv, e.hh) - mksbv := make([]bytesRv, len(mks)) - for i, k := range mks { - v := &mksbv[i] - l := len(mksv) - e2.MustEncode(k) - v.r = k - v.v = mksv[l:] - // fmt.Printf(">>>>> %s\n", mksv[l:]) - } - sort.Sort(bytesRvSlice(mksbv)) - for j := range mksbv { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - e.asis(mksbv[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - e.encodeValue(rv.MapIndex(mksbv[j].r), valFn) + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) } } } -// -------------------------------------------------- - -// encFn encapsulates the captured variables and the encode function. -// This way, we only do some calculations one times, and pass to the -// code block that should be called (encapsulated in a function) -// instead of executing the checks every time. -type encFn struct { - i encFnInfo - f func(*encFnInfo, reflect.Value) -} - -// -------------------------------------------------- - -type encRtidFn struct { - rtid uintptr - fn encFn -} +// // -------------------------------------------------- // An Encoder writes an object to an output stream in the codec format. type Encoder struct { @@ -922,23 +1011,26 @@ type Encoder struct { e encDriver // NOTE: Encoder shouldn't call it's write methods, // as the handler MAY need to do some coordination. - w encWriter - s []encRtidFn - ci set - be bool // is binary encoding - js bool // is json handle + w encWriter + + hh Handle + h *BasicHandle + + // ---- cpu cache line boundary? wi ioEncWriter wb bytesEncWriter + bw bufio.Writer - h *BasicHandle - hh Handle - - cr containerStateRecv + // cr containerStateRecv as encDriverAsis + // ---- cpu cache line boundary? - f map[uintptr]*encFn - b [scratchByteArrayLen]byte + ci set + err error + + b [scratchByteArrayLen]byte + cf codecFner } // NewEncoder returns an Encoder for encoding into an io.Writer. @@ -963,11 +1055,10 @@ func NewEncoderBytes(out *[]byte, h Handle) *Encoder { } func newEncoder(h Handle) *Encoder { - e := &Encoder{hh: h, h: h.getBasicHandle(), be: h.isBinary()} - _, e.js = h.(*JsonHandle) + e := &Encoder{hh: h, h: h.getBasicHandle()} e.e = h.newEncDriver(e) e.as, _ = e.e.(encDriverAsis) - e.cr, _ = e.e.(containerStateRecv) + // e.cr, _ = e.e.(containerStateRecv) return e } @@ -976,19 +1067,29 @@ func newEncoder(h Handle) *Encoder { // This accommodates using the state of the Encoder, // where it has "cached" information about sub-engines. func (e *Encoder) Reset(w io.Writer) { - ww, ok := w.(ioEncWriterWriter) - if ok { - e.wi.w = ww + var ok bool + e.wi.w = w + if e.h.WriterBufferSize > 0 { + bw := bufio.NewWriterSize(w, e.h.WriterBufferSize) + e.bw = *bw + e.wi.bw = &e.bw + e.wi.sw = &e.bw + e.wi.fw = &e.bw + e.wi.ww = &e.bw } else { - sww := &e.wi.s - sww.w = w - sww.bw, _ = w.(io.ByteWriter) - sww.sw, _ = w.(ioEncStringWriter) - e.wi.w = sww - //ww = bufio.NewWriterSize(w, defEncByteBufSize) + if e.wi.bw, ok = w.(io.ByteWriter); !ok { + e.wi.bw = &e.wi + } + if e.wi.sw, ok = w.(ioEncStringWriter); !ok { + e.wi.sw = &e.wi + } + e.wi.fw, _ = w.(ioEncFlusher) + e.wi.ww = w } e.w = &e.wi e.e.reset() + e.cf.reset(e.hh) + e.err = nil } func (e *Encoder) ResetBytes(out *[]byte) { @@ -999,14 +1100,10 @@ func (e *Encoder) ResetBytes(out *[]byte) { e.wb.b, e.wb.out, e.wb.c = in, out, 0 e.w = &e.wb e.e.reset() + e.cf.reset(e.hh) + e.err = nil } -// func (e *Encoder) sendContainerState(c containerState) { -// if e.cr != nil { -// e.cr.sendContainerState(c) -// } -// } - // Encode writes an object into a stream. // // Encoding can be configured via the struct tag for the fields. @@ -1068,34 +1165,41 @@ func (e *Encoder) ResetBytes(out *[]byte) { // Some formats support symbols (e.g. binc) and will properly encode the string // only once in the stream, and use a tag to refer to it thereafter. func (e *Encoder) Encode(v interface{}) (err error) { - defer panicToErr(&err) - e.encode(v) - e.w.atEndOfEncode() + defer panicToErrs2(&e.err, &err) + e.MustEncode(v) return } // MustEncode is like Encode, but panics if unable to Encode. // This provides insight to the code location that triggered the error. func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } e.encode(v) + e.e.atEndOfEncode() e.w.atEndOfEncode() } func (e *Encoder) encode(iv interface{}) { - // if ics, ok := iv.(Selfer); ok { - // ics.CodecEncodeSelf(e) - // return - // } + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + return + } switch v := iv.(type) { - case nil: - e.e.EncodeNil() - case Selfer: - v.CodecEncodeSelf(e) + // case nil: + // e.e.EncodeNil() + // case Selfer: + // v.CodecEncodeSelf(e) case Raw: - e.raw(v) + e.rawBytes(v) case reflect.Value: - e.encodeValue(v, nil) + e.encodeValue(v, nil, true) case string: e.e.EncodeString(c_UTF8, v) @@ -1121,6 +1225,8 @@ func (e *Encoder) encode(iv interface{}) { e.e.EncodeUint(uint64(v)) case uint64: e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) case float32: e.e.EncodeFloat32(v) case float64: @@ -1153,6 +1259,8 @@ func (e *Encoder) encode(iv interface{}) { e.e.EncodeUint(uint64(*v)) case *uint64: e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) case *float32: e.e.EncodeFloat32(*v) case *float64: @@ -1162,15 +1270,18 @@ func (e *Encoder) encode(iv interface{}) { e.e.EncodeStringBytes(c_RAW, *v) default: - const checkCodecSelfer1 = true // in case T is passed, where *T is a Selfer, still checkCodecSelfer if !fastpathEncodeTypeSwitch(iv, e) { - e.encodeI(iv, false, checkCodecSelfer1) + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) } } } -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { - // use a goto statement instead of a recursive function for ptr/interface. +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool TOP: switch rv.Kind() { case reflect.Ptr: @@ -1178,6 +1289,8 @@ TOP: e.e.EncodeNil() return } + rvpValid = true + rvp = rv rv = rv.Elem() if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { // TODO: Movable pointers will be an issue here. Future problem. @@ -1202,167 +1315,38 @@ TOP: return } - proceed = true - rv2 = rv - return -} - -func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, - checkFastpath, checkCodecSelfer bool) { - if sptr != 0 { - if (&e.ci).add(sptr) { - e.errorf("circular reference found: # %d", sptr) - } + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) } + if fn == nil { rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - // fn = e.getEncFn(rtid, rt, true, true) - fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + // TODO: calling isRecognizedRtid here is a major slowdown + if false && useLookupRecognizedTypes && isRecognizedRtidOrPtr(rt2id(rt)) { + e.encode(rv2i(rv)) + return + } + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.cf.get(rt, checkFastpath, true) + } + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } + } else { + fn.fe(e, &fn.i, rv) } - fn.f(&fn.i, rv) if sptr != 0 { (&e.ci).remove(sptr) } } -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) - } -} - -func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { - // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, sptr, proceed := e.preEncodeValue(rv); proceed { - e.doEncodeValue(rv, fn, sptr, true, true) - } -} - -func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) { - // rtid := reflect.ValueOf(rt).Pointer() - var ok bool - if useMapForCodecCache { - fn, ok = e.f[rtid] - } else { - for i := range e.s { - v := &(e.s[i]) - if v.rtid == rtid { - fn, ok = &(v.fn), true - break - } - } - } - if ok { - return - } - - if useMapForCodecCache { - if e.f == nil { - e.f = make(map[uintptr]*encFn, initCollectionCap) - } - fn = new(encFn) - e.f[rtid] = fn - } else { - if e.s == nil { - e.s = make([]encRtidFn, 0, initCollectionCap) - } - e.s = append(e.s, encRtidFn{rtid: rtid}) - fn = &(e.s[len(e.s)-1]).fn - } - - ti := e.h.getTypeInfo(rtid, rt) - fi := &(fn.i) - fi.e = e - fi.ti = ti - - if checkCodecSelfer && ti.cs { - fn.f = (*encFnInfo).selferMarshal - } else if rtid == rawTypId { - fn.f = (*encFnInfo).raw - } else if rtid == rawExtTypId { - fn.f = (*encFnInfo).rawExt - } else if e.e.IsBuiltinType(rtid) { - fn.f = (*encFnInfo).builtin - } else if xfFn := e.h.getExt(rtid); xfFn != nil { - fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext - fn.f = (*encFnInfo).ext - } else if supportMarshalInterfaces && e.be && ti.bm { - fn.f = (*encFnInfo).binaryMarshal - } else if supportMarshalInterfaces && !e.be && e.js && ti.jm { - //If JSON, we should check JSONMarshal before textMarshal - fn.f = (*encFnInfo).jsonMarshal - } else if supportMarshalInterfaces && !e.be && ti.tm { - fn.f = (*encFnInfo).textMarshal - } else { - rk := rt.Kind() - if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { // un-named slice or map - if idx := fastpathAV.index(rtid); idx != -1 { - fn.f = fastpathAV[idx].encfn - } - } else { - ok = false - // use mapping for underlying type if there - var rtu reflect.Type - if rk == reflect.Map { - rtu = reflect.MapOf(rt.Key(), rt.Elem()) - } else { - rtu = reflect.SliceOf(rt.Elem()) - } - rtuid := reflect.ValueOf(rtu).Pointer() - if idx := fastpathAV.index(rtuid); idx != -1 { - xfnf := fastpathAV[idx].encfn - xrt := fastpathAV[idx].rt - fn.f = func(xf *encFnInfo, xrv reflect.Value) { - xfnf(xf, xrv.Convert(xrt)) - } - } - } - } - if fn.f == nil { - switch rk { - case reflect.Bool: - fn.f = (*encFnInfo).kBool - case reflect.String: - fn.f = (*encFnInfo).kString - case reflect.Float64: - fn.f = (*encFnInfo).kFloat64 - case reflect.Float32: - fn.f = (*encFnInfo).kFloat32 - case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: - fn.f = (*encFnInfo).kInt - case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uintptr: - fn.f = (*encFnInfo).kUint - case reflect.Invalid: - fn.f = (*encFnInfo).kInvalid - case reflect.Chan: - fi.seq = seqTypeChan - fn.f = (*encFnInfo).kSlice - case reflect.Slice: - fi.seq = seqTypeSlice - fn.f = (*encFnInfo).kSlice - case reflect.Array: - fi.seq = seqTypeArray - fn.f = (*encFnInfo).kSlice - case reflect.Struct: - fn.f = (*encFnInfo).kStruct - // reflect.Ptr and reflect.Interface are handled already by preEncodeValue - // case reflect.Ptr: - // fn.f = (*encFnInfo).kPtr - // case reflect.Interface: - // fn.f = (*encFnInfo).kInterface - case reflect.Map: - fn.f = (*encFnInfo).kMap - default: - fn.f = (*encFnInfo).kErr - } - } - } - - return -} - func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { if fnerr != nil { panic(fnerr) @@ -1384,7 +1368,7 @@ func (e *Encoder) asis(v []byte) { } } -func (e *Encoder) raw(vv Raw) { +func (e *Encoder) rawBytes(vv Raw) { v := []byte(vv) if !e.h.Raw { e.errorf("Raw values cannot be encoded: %v", v) @@ -1400,63 +1384,3 @@ func (e *Encoder) errorf(format string, params ...interface{}) { err := fmt.Errorf(format, params...) panic(err) } - -// ---------------------------------------- - -const encStructPoolLen = 5 - -// encStructPool is an array of sync.Pool. -// Each element of the array pools one of encStructPool(8|16|32|64). -// It allows the re-use of slices up to 64 in length. -// A performance cost of encoding structs was collecting -// which values were empty and should be omitted. -// We needed slices of reflect.Value and string to collect them. -// This shared pool reduces the amount of unnecessary creation we do. -// The cost is that of locking sometimes, but sync.Pool is efficient -// enough to reduce thread contention. -var encStructPool [encStructPoolLen]sync.Pool - -func init() { - encStructPool[0].New = func() interface{} { return new([8]stringRv) } - encStructPool[1].New = func() interface{} { return new([16]stringRv) } - encStructPool[2].New = func() interface{} { return new([32]stringRv) } - encStructPool[3].New = func() interface{} { return new([64]stringRv) } - encStructPool[4].New = func() interface{} { return new([128]stringRv) } -} - -func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { - // if encStructPoolLen != 5 { // constant chec, so removed at build time. - // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed - // } - // idxpool := newlen / 8 - if newlen <= 8 { - p = &encStructPool[0] - v = p.Get() - s = v.(*[8]stringRv)[:newlen] - } else if newlen <= 16 { - p = &encStructPool[1] - v = p.Get() - s = v.(*[16]stringRv)[:newlen] - } else if newlen <= 32 { - p = &encStructPool[2] - v = p.Get() - s = v.(*[32]stringRv)[:newlen] - } else if newlen <= 64 { - p = &encStructPool[3] - v = p.Get() - s = v.(*[64]stringRv)[:newlen] - } else if newlen <= 128 { - p = &encStructPool[4] - v = p.Get() - s = v.(*[128]stringRv)[:newlen] - } else { - s = make([]stringRv, newlen) - } - return -} - -// ---------------------------------------- - -// func encErr(format string, params ...interface{}) { -// doPanic(msgTagEnc, format, params...) -// } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go index f2e5d2dcf6..ffe0c6d59d 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.generated.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -40,9 +40,6 @@ import ( const fastpathEnabled = true -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - type fastpathT struct{} var fastpathTV fastpathT @@ -50,8 +47,8 @@ var fastpathTV fastpathT type fastpathE struct { rtid uintptr rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) } type fastpathA [271]fastpathE @@ -83,287 +80,296 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { + if useLookupRecognizedTypes && recognizedRtidsLoaded { + panic("recognizedRtidsLoaded = true - cannot happen") + } i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() + xptr := rt2id(xrt) + if useLookupRecognizedTypes { + recognizedRtids = append(recognizedRtids, xptr) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) + } fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} i++ return } - fn([]interface{}(nil), (*encFnInfo).fastpathEncSliceIntfR, (*decFnInfo).fastpathDecSliceIntfR) - fn([]string(nil), (*encFnInfo).fastpathEncSliceStringR, (*decFnInfo).fastpathDecSliceStringR) - fn([]float32(nil), (*encFnInfo).fastpathEncSliceFloat32R, (*decFnInfo).fastpathDecSliceFloat32R) - fn([]float64(nil), (*encFnInfo).fastpathEncSliceFloat64R, (*decFnInfo).fastpathDecSliceFloat64R) - fn([]uint(nil), (*encFnInfo).fastpathEncSliceUintR, (*decFnInfo).fastpathDecSliceUintR) - fn([]uint16(nil), (*encFnInfo).fastpathEncSliceUint16R, (*decFnInfo).fastpathDecSliceUint16R) - fn([]uint32(nil), (*encFnInfo).fastpathEncSliceUint32R, (*decFnInfo).fastpathDecSliceUint32R) - fn([]uint64(nil), (*encFnInfo).fastpathEncSliceUint64R, (*decFnInfo).fastpathDecSliceUint64R) - fn([]uintptr(nil), (*encFnInfo).fastpathEncSliceUintptrR, (*decFnInfo).fastpathDecSliceUintptrR) - fn([]int(nil), (*encFnInfo).fastpathEncSliceIntR, (*decFnInfo).fastpathDecSliceIntR) - fn([]int8(nil), (*encFnInfo).fastpathEncSliceInt8R, (*decFnInfo).fastpathDecSliceInt8R) - fn([]int16(nil), (*encFnInfo).fastpathEncSliceInt16R, (*decFnInfo).fastpathDecSliceInt16R) - fn([]int32(nil), (*encFnInfo).fastpathEncSliceInt32R, (*decFnInfo).fastpathDecSliceInt32R) - fn([]int64(nil), (*encFnInfo).fastpathEncSliceInt64R, (*decFnInfo).fastpathDecSliceInt64R) - fn([]bool(nil), (*encFnInfo).fastpathEncSliceBoolR, (*decFnInfo).fastpathDecSliceBoolR) + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) - fn(map[interface{}]interface{}(nil), (*encFnInfo).fastpathEncMapIntfIntfR, (*decFnInfo).fastpathDecMapIntfIntfR) - fn(map[interface{}]string(nil), (*encFnInfo).fastpathEncMapIntfStringR, (*decFnInfo).fastpathDecMapIntfStringR) - fn(map[interface{}]uint(nil), (*encFnInfo).fastpathEncMapIntfUintR, (*decFnInfo).fastpathDecMapIntfUintR) - fn(map[interface{}]uint8(nil), (*encFnInfo).fastpathEncMapIntfUint8R, (*decFnInfo).fastpathDecMapIntfUint8R) - fn(map[interface{}]uint16(nil), (*encFnInfo).fastpathEncMapIntfUint16R, (*decFnInfo).fastpathDecMapIntfUint16R) - fn(map[interface{}]uint32(nil), (*encFnInfo).fastpathEncMapIntfUint32R, (*decFnInfo).fastpathDecMapIntfUint32R) - fn(map[interface{}]uint64(nil), (*encFnInfo).fastpathEncMapIntfUint64R, (*decFnInfo).fastpathDecMapIntfUint64R) - fn(map[interface{}]uintptr(nil), (*encFnInfo).fastpathEncMapIntfUintptrR, (*decFnInfo).fastpathDecMapIntfUintptrR) - fn(map[interface{}]int(nil), (*encFnInfo).fastpathEncMapIntfIntR, (*decFnInfo).fastpathDecMapIntfIntR) - fn(map[interface{}]int8(nil), (*encFnInfo).fastpathEncMapIntfInt8R, (*decFnInfo).fastpathDecMapIntfInt8R) - fn(map[interface{}]int16(nil), (*encFnInfo).fastpathEncMapIntfInt16R, (*decFnInfo).fastpathDecMapIntfInt16R) - fn(map[interface{}]int32(nil), (*encFnInfo).fastpathEncMapIntfInt32R, (*decFnInfo).fastpathDecMapIntfInt32R) - fn(map[interface{}]int64(nil), (*encFnInfo).fastpathEncMapIntfInt64R, (*decFnInfo).fastpathDecMapIntfInt64R) - fn(map[interface{}]float32(nil), (*encFnInfo).fastpathEncMapIntfFloat32R, (*decFnInfo).fastpathDecMapIntfFloat32R) - fn(map[interface{}]float64(nil), (*encFnInfo).fastpathEncMapIntfFloat64R, (*decFnInfo).fastpathDecMapIntfFloat64R) - fn(map[interface{}]bool(nil), (*encFnInfo).fastpathEncMapIntfBoolR, (*decFnInfo).fastpathDecMapIntfBoolR) - fn(map[string]interface{}(nil), (*encFnInfo).fastpathEncMapStringIntfR, (*decFnInfo).fastpathDecMapStringIntfR) - fn(map[string]string(nil), (*encFnInfo).fastpathEncMapStringStringR, (*decFnInfo).fastpathDecMapStringStringR) - fn(map[string]uint(nil), (*encFnInfo).fastpathEncMapStringUintR, (*decFnInfo).fastpathDecMapStringUintR) - fn(map[string]uint8(nil), (*encFnInfo).fastpathEncMapStringUint8R, (*decFnInfo).fastpathDecMapStringUint8R) - fn(map[string]uint16(nil), (*encFnInfo).fastpathEncMapStringUint16R, (*decFnInfo).fastpathDecMapStringUint16R) - fn(map[string]uint32(nil), (*encFnInfo).fastpathEncMapStringUint32R, (*decFnInfo).fastpathDecMapStringUint32R) - fn(map[string]uint64(nil), (*encFnInfo).fastpathEncMapStringUint64R, (*decFnInfo).fastpathDecMapStringUint64R) - fn(map[string]uintptr(nil), (*encFnInfo).fastpathEncMapStringUintptrR, (*decFnInfo).fastpathDecMapStringUintptrR) - fn(map[string]int(nil), (*encFnInfo).fastpathEncMapStringIntR, (*decFnInfo).fastpathDecMapStringIntR) - fn(map[string]int8(nil), (*encFnInfo).fastpathEncMapStringInt8R, (*decFnInfo).fastpathDecMapStringInt8R) - fn(map[string]int16(nil), (*encFnInfo).fastpathEncMapStringInt16R, (*decFnInfo).fastpathDecMapStringInt16R) - fn(map[string]int32(nil), (*encFnInfo).fastpathEncMapStringInt32R, (*decFnInfo).fastpathDecMapStringInt32R) - fn(map[string]int64(nil), (*encFnInfo).fastpathEncMapStringInt64R, (*decFnInfo).fastpathDecMapStringInt64R) - fn(map[string]float32(nil), (*encFnInfo).fastpathEncMapStringFloat32R, (*decFnInfo).fastpathDecMapStringFloat32R) - fn(map[string]float64(nil), (*encFnInfo).fastpathEncMapStringFloat64R, (*decFnInfo).fastpathDecMapStringFloat64R) - fn(map[string]bool(nil), (*encFnInfo).fastpathEncMapStringBoolR, (*decFnInfo).fastpathDecMapStringBoolR) - fn(map[float32]interface{}(nil), (*encFnInfo).fastpathEncMapFloat32IntfR, (*decFnInfo).fastpathDecMapFloat32IntfR) - fn(map[float32]string(nil), (*encFnInfo).fastpathEncMapFloat32StringR, (*decFnInfo).fastpathDecMapFloat32StringR) - fn(map[float32]uint(nil), (*encFnInfo).fastpathEncMapFloat32UintR, (*decFnInfo).fastpathDecMapFloat32UintR) - fn(map[float32]uint8(nil), (*encFnInfo).fastpathEncMapFloat32Uint8R, (*decFnInfo).fastpathDecMapFloat32Uint8R) - fn(map[float32]uint16(nil), (*encFnInfo).fastpathEncMapFloat32Uint16R, (*decFnInfo).fastpathDecMapFloat32Uint16R) - fn(map[float32]uint32(nil), (*encFnInfo).fastpathEncMapFloat32Uint32R, (*decFnInfo).fastpathDecMapFloat32Uint32R) - fn(map[float32]uint64(nil), (*encFnInfo).fastpathEncMapFloat32Uint64R, (*decFnInfo).fastpathDecMapFloat32Uint64R) - fn(map[float32]uintptr(nil), (*encFnInfo).fastpathEncMapFloat32UintptrR, (*decFnInfo).fastpathDecMapFloat32UintptrR) - fn(map[float32]int(nil), (*encFnInfo).fastpathEncMapFloat32IntR, (*decFnInfo).fastpathDecMapFloat32IntR) - fn(map[float32]int8(nil), (*encFnInfo).fastpathEncMapFloat32Int8R, (*decFnInfo).fastpathDecMapFloat32Int8R) - fn(map[float32]int16(nil), (*encFnInfo).fastpathEncMapFloat32Int16R, (*decFnInfo).fastpathDecMapFloat32Int16R) - fn(map[float32]int32(nil), (*encFnInfo).fastpathEncMapFloat32Int32R, (*decFnInfo).fastpathDecMapFloat32Int32R) - fn(map[float32]int64(nil), (*encFnInfo).fastpathEncMapFloat32Int64R, (*decFnInfo).fastpathDecMapFloat32Int64R) - fn(map[float32]float32(nil), (*encFnInfo).fastpathEncMapFloat32Float32R, (*decFnInfo).fastpathDecMapFloat32Float32R) - fn(map[float32]float64(nil), (*encFnInfo).fastpathEncMapFloat32Float64R, (*decFnInfo).fastpathDecMapFloat32Float64R) - fn(map[float32]bool(nil), (*encFnInfo).fastpathEncMapFloat32BoolR, (*decFnInfo).fastpathDecMapFloat32BoolR) - fn(map[float64]interface{}(nil), (*encFnInfo).fastpathEncMapFloat64IntfR, (*decFnInfo).fastpathDecMapFloat64IntfR) - fn(map[float64]string(nil), (*encFnInfo).fastpathEncMapFloat64StringR, (*decFnInfo).fastpathDecMapFloat64StringR) - fn(map[float64]uint(nil), (*encFnInfo).fastpathEncMapFloat64UintR, (*decFnInfo).fastpathDecMapFloat64UintR) - fn(map[float64]uint8(nil), (*encFnInfo).fastpathEncMapFloat64Uint8R, (*decFnInfo).fastpathDecMapFloat64Uint8R) - fn(map[float64]uint16(nil), (*encFnInfo).fastpathEncMapFloat64Uint16R, (*decFnInfo).fastpathDecMapFloat64Uint16R) - fn(map[float64]uint32(nil), (*encFnInfo).fastpathEncMapFloat64Uint32R, (*decFnInfo).fastpathDecMapFloat64Uint32R) - fn(map[float64]uint64(nil), (*encFnInfo).fastpathEncMapFloat64Uint64R, (*decFnInfo).fastpathDecMapFloat64Uint64R) - fn(map[float64]uintptr(nil), (*encFnInfo).fastpathEncMapFloat64UintptrR, (*decFnInfo).fastpathDecMapFloat64UintptrR) - fn(map[float64]int(nil), (*encFnInfo).fastpathEncMapFloat64IntR, (*decFnInfo).fastpathDecMapFloat64IntR) - fn(map[float64]int8(nil), (*encFnInfo).fastpathEncMapFloat64Int8R, (*decFnInfo).fastpathDecMapFloat64Int8R) - fn(map[float64]int16(nil), (*encFnInfo).fastpathEncMapFloat64Int16R, (*decFnInfo).fastpathDecMapFloat64Int16R) - fn(map[float64]int32(nil), (*encFnInfo).fastpathEncMapFloat64Int32R, (*decFnInfo).fastpathDecMapFloat64Int32R) - fn(map[float64]int64(nil), (*encFnInfo).fastpathEncMapFloat64Int64R, (*decFnInfo).fastpathDecMapFloat64Int64R) - fn(map[float64]float32(nil), (*encFnInfo).fastpathEncMapFloat64Float32R, (*decFnInfo).fastpathDecMapFloat64Float32R) - fn(map[float64]float64(nil), (*encFnInfo).fastpathEncMapFloat64Float64R, (*decFnInfo).fastpathDecMapFloat64Float64R) - fn(map[float64]bool(nil), (*encFnInfo).fastpathEncMapFloat64BoolR, (*decFnInfo).fastpathDecMapFloat64BoolR) - fn(map[uint]interface{}(nil), (*encFnInfo).fastpathEncMapUintIntfR, (*decFnInfo).fastpathDecMapUintIntfR) - fn(map[uint]string(nil), (*encFnInfo).fastpathEncMapUintStringR, (*decFnInfo).fastpathDecMapUintStringR) - fn(map[uint]uint(nil), (*encFnInfo).fastpathEncMapUintUintR, (*decFnInfo).fastpathDecMapUintUintR) - fn(map[uint]uint8(nil), (*encFnInfo).fastpathEncMapUintUint8R, (*decFnInfo).fastpathDecMapUintUint8R) - fn(map[uint]uint16(nil), (*encFnInfo).fastpathEncMapUintUint16R, (*decFnInfo).fastpathDecMapUintUint16R) - fn(map[uint]uint32(nil), (*encFnInfo).fastpathEncMapUintUint32R, (*decFnInfo).fastpathDecMapUintUint32R) - fn(map[uint]uint64(nil), (*encFnInfo).fastpathEncMapUintUint64R, (*decFnInfo).fastpathDecMapUintUint64R) - fn(map[uint]uintptr(nil), (*encFnInfo).fastpathEncMapUintUintptrR, (*decFnInfo).fastpathDecMapUintUintptrR) - fn(map[uint]int(nil), (*encFnInfo).fastpathEncMapUintIntR, (*decFnInfo).fastpathDecMapUintIntR) - fn(map[uint]int8(nil), (*encFnInfo).fastpathEncMapUintInt8R, (*decFnInfo).fastpathDecMapUintInt8R) - fn(map[uint]int16(nil), (*encFnInfo).fastpathEncMapUintInt16R, (*decFnInfo).fastpathDecMapUintInt16R) - fn(map[uint]int32(nil), (*encFnInfo).fastpathEncMapUintInt32R, (*decFnInfo).fastpathDecMapUintInt32R) - fn(map[uint]int64(nil), (*encFnInfo).fastpathEncMapUintInt64R, (*decFnInfo).fastpathDecMapUintInt64R) - fn(map[uint]float32(nil), (*encFnInfo).fastpathEncMapUintFloat32R, (*decFnInfo).fastpathDecMapUintFloat32R) - fn(map[uint]float64(nil), (*encFnInfo).fastpathEncMapUintFloat64R, (*decFnInfo).fastpathDecMapUintFloat64R) - fn(map[uint]bool(nil), (*encFnInfo).fastpathEncMapUintBoolR, (*decFnInfo).fastpathDecMapUintBoolR) - fn(map[uint8]interface{}(nil), (*encFnInfo).fastpathEncMapUint8IntfR, (*decFnInfo).fastpathDecMapUint8IntfR) - fn(map[uint8]string(nil), (*encFnInfo).fastpathEncMapUint8StringR, (*decFnInfo).fastpathDecMapUint8StringR) - fn(map[uint8]uint(nil), (*encFnInfo).fastpathEncMapUint8UintR, (*decFnInfo).fastpathDecMapUint8UintR) - fn(map[uint8]uint8(nil), (*encFnInfo).fastpathEncMapUint8Uint8R, (*decFnInfo).fastpathDecMapUint8Uint8R) - fn(map[uint8]uint16(nil), (*encFnInfo).fastpathEncMapUint8Uint16R, (*decFnInfo).fastpathDecMapUint8Uint16R) - fn(map[uint8]uint32(nil), (*encFnInfo).fastpathEncMapUint8Uint32R, (*decFnInfo).fastpathDecMapUint8Uint32R) - fn(map[uint8]uint64(nil), (*encFnInfo).fastpathEncMapUint8Uint64R, (*decFnInfo).fastpathDecMapUint8Uint64R) - fn(map[uint8]uintptr(nil), (*encFnInfo).fastpathEncMapUint8UintptrR, (*decFnInfo).fastpathDecMapUint8UintptrR) - fn(map[uint8]int(nil), (*encFnInfo).fastpathEncMapUint8IntR, (*decFnInfo).fastpathDecMapUint8IntR) - fn(map[uint8]int8(nil), (*encFnInfo).fastpathEncMapUint8Int8R, (*decFnInfo).fastpathDecMapUint8Int8R) - fn(map[uint8]int16(nil), (*encFnInfo).fastpathEncMapUint8Int16R, (*decFnInfo).fastpathDecMapUint8Int16R) - fn(map[uint8]int32(nil), (*encFnInfo).fastpathEncMapUint8Int32R, (*decFnInfo).fastpathDecMapUint8Int32R) - fn(map[uint8]int64(nil), (*encFnInfo).fastpathEncMapUint8Int64R, (*decFnInfo).fastpathDecMapUint8Int64R) - fn(map[uint8]float32(nil), (*encFnInfo).fastpathEncMapUint8Float32R, (*decFnInfo).fastpathDecMapUint8Float32R) - fn(map[uint8]float64(nil), (*encFnInfo).fastpathEncMapUint8Float64R, (*decFnInfo).fastpathDecMapUint8Float64R) - fn(map[uint8]bool(nil), (*encFnInfo).fastpathEncMapUint8BoolR, (*decFnInfo).fastpathDecMapUint8BoolR) - fn(map[uint16]interface{}(nil), (*encFnInfo).fastpathEncMapUint16IntfR, (*decFnInfo).fastpathDecMapUint16IntfR) - fn(map[uint16]string(nil), (*encFnInfo).fastpathEncMapUint16StringR, (*decFnInfo).fastpathDecMapUint16StringR) - fn(map[uint16]uint(nil), (*encFnInfo).fastpathEncMapUint16UintR, (*decFnInfo).fastpathDecMapUint16UintR) - fn(map[uint16]uint8(nil), (*encFnInfo).fastpathEncMapUint16Uint8R, (*decFnInfo).fastpathDecMapUint16Uint8R) - fn(map[uint16]uint16(nil), (*encFnInfo).fastpathEncMapUint16Uint16R, (*decFnInfo).fastpathDecMapUint16Uint16R) - fn(map[uint16]uint32(nil), (*encFnInfo).fastpathEncMapUint16Uint32R, (*decFnInfo).fastpathDecMapUint16Uint32R) - fn(map[uint16]uint64(nil), (*encFnInfo).fastpathEncMapUint16Uint64R, (*decFnInfo).fastpathDecMapUint16Uint64R) - fn(map[uint16]uintptr(nil), (*encFnInfo).fastpathEncMapUint16UintptrR, (*decFnInfo).fastpathDecMapUint16UintptrR) - fn(map[uint16]int(nil), (*encFnInfo).fastpathEncMapUint16IntR, (*decFnInfo).fastpathDecMapUint16IntR) - fn(map[uint16]int8(nil), (*encFnInfo).fastpathEncMapUint16Int8R, (*decFnInfo).fastpathDecMapUint16Int8R) - fn(map[uint16]int16(nil), (*encFnInfo).fastpathEncMapUint16Int16R, (*decFnInfo).fastpathDecMapUint16Int16R) - fn(map[uint16]int32(nil), (*encFnInfo).fastpathEncMapUint16Int32R, (*decFnInfo).fastpathDecMapUint16Int32R) - fn(map[uint16]int64(nil), (*encFnInfo).fastpathEncMapUint16Int64R, (*decFnInfo).fastpathDecMapUint16Int64R) - fn(map[uint16]float32(nil), (*encFnInfo).fastpathEncMapUint16Float32R, (*decFnInfo).fastpathDecMapUint16Float32R) - fn(map[uint16]float64(nil), (*encFnInfo).fastpathEncMapUint16Float64R, (*decFnInfo).fastpathDecMapUint16Float64R) - fn(map[uint16]bool(nil), (*encFnInfo).fastpathEncMapUint16BoolR, (*decFnInfo).fastpathDecMapUint16BoolR) - fn(map[uint32]interface{}(nil), (*encFnInfo).fastpathEncMapUint32IntfR, (*decFnInfo).fastpathDecMapUint32IntfR) - fn(map[uint32]string(nil), (*encFnInfo).fastpathEncMapUint32StringR, (*decFnInfo).fastpathDecMapUint32StringR) - fn(map[uint32]uint(nil), (*encFnInfo).fastpathEncMapUint32UintR, (*decFnInfo).fastpathDecMapUint32UintR) - fn(map[uint32]uint8(nil), (*encFnInfo).fastpathEncMapUint32Uint8R, (*decFnInfo).fastpathDecMapUint32Uint8R) - fn(map[uint32]uint16(nil), (*encFnInfo).fastpathEncMapUint32Uint16R, (*decFnInfo).fastpathDecMapUint32Uint16R) - fn(map[uint32]uint32(nil), (*encFnInfo).fastpathEncMapUint32Uint32R, (*decFnInfo).fastpathDecMapUint32Uint32R) - fn(map[uint32]uint64(nil), (*encFnInfo).fastpathEncMapUint32Uint64R, (*decFnInfo).fastpathDecMapUint32Uint64R) - fn(map[uint32]uintptr(nil), (*encFnInfo).fastpathEncMapUint32UintptrR, (*decFnInfo).fastpathDecMapUint32UintptrR) - fn(map[uint32]int(nil), (*encFnInfo).fastpathEncMapUint32IntR, (*decFnInfo).fastpathDecMapUint32IntR) - fn(map[uint32]int8(nil), (*encFnInfo).fastpathEncMapUint32Int8R, (*decFnInfo).fastpathDecMapUint32Int8R) - fn(map[uint32]int16(nil), (*encFnInfo).fastpathEncMapUint32Int16R, (*decFnInfo).fastpathDecMapUint32Int16R) - fn(map[uint32]int32(nil), (*encFnInfo).fastpathEncMapUint32Int32R, (*decFnInfo).fastpathDecMapUint32Int32R) - fn(map[uint32]int64(nil), (*encFnInfo).fastpathEncMapUint32Int64R, (*decFnInfo).fastpathDecMapUint32Int64R) - fn(map[uint32]float32(nil), (*encFnInfo).fastpathEncMapUint32Float32R, (*decFnInfo).fastpathDecMapUint32Float32R) - fn(map[uint32]float64(nil), (*encFnInfo).fastpathEncMapUint32Float64R, (*decFnInfo).fastpathDecMapUint32Float64R) - fn(map[uint32]bool(nil), (*encFnInfo).fastpathEncMapUint32BoolR, (*decFnInfo).fastpathDecMapUint32BoolR) - fn(map[uint64]interface{}(nil), (*encFnInfo).fastpathEncMapUint64IntfR, (*decFnInfo).fastpathDecMapUint64IntfR) - fn(map[uint64]string(nil), (*encFnInfo).fastpathEncMapUint64StringR, (*decFnInfo).fastpathDecMapUint64StringR) - fn(map[uint64]uint(nil), (*encFnInfo).fastpathEncMapUint64UintR, (*decFnInfo).fastpathDecMapUint64UintR) - fn(map[uint64]uint8(nil), (*encFnInfo).fastpathEncMapUint64Uint8R, (*decFnInfo).fastpathDecMapUint64Uint8R) - fn(map[uint64]uint16(nil), (*encFnInfo).fastpathEncMapUint64Uint16R, (*decFnInfo).fastpathDecMapUint64Uint16R) - fn(map[uint64]uint32(nil), (*encFnInfo).fastpathEncMapUint64Uint32R, (*decFnInfo).fastpathDecMapUint64Uint32R) - fn(map[uint64]uint64(nil), (*encFnInfo).fastpathEncMapUint64Uint64R, (*decFnInfo).fastpathDecMapUint64Uint64R) - fn(map[uint64]uintptr(nil), (*encFnInfo).fastpathEncMapUint64UintptrR, (*decFnInfo).fastpathDecMapUint64UintptrR) - fn(map[uint64]int(nil), (*encFnInfo).fastpathEncMapUint64IntR, (*decFnInfo).fastpathDecMapUint64IntR) - fn(map[uint64]int8(nil), (*encFnInfo).fastpathEncMapUint64Int8R, (*decFnInfo).fastpathDecMapUint64Int8R) - fn(map[uint64]int16(nil), (*encFnInfo).fastpathEncMapUint64Int16R, (*decFnInfo).fastpathDecMapUint64Int16R) - fn(map[uint64]int32(nil), (*encFnInfo).fastpathEncMapUint64Int32R, (*decFnInfo).fastpathDecMapUint64Int32R) - fn(map[uint64]int64(nil), (*encFnInfo).fastpathEncMapUint64Int64R, (*decFnInfo).fastpathDecMapUint64Int64R) - fn(map[uint64]float32(nil), (*encFnInfo).fastpathEncMapUint64Float32R, (*decFnInfo).fastpathDecMapUint64Float32R) - fn(map[uint64]float64(nil), (*encFnInfo).fastpathEncMapUint64Float64R, (*decFnInfo).fastpathDecMapUint64Float64R) - fn(map[uint64]bool(nil), (*encFnInfo).fastpathEncMapUint64BoolR, (*decFnInfo).fastpathDecMapUint64BoolR) - fn(map[uintptr]interface{}(nil), (*encFnInfo).fastpathEncMapUintptrIntfR, (*decFnInfo).fastpathDecMapUintptrIntfR) - fn(map[uintptr]string(nil), (*encFnInfo).fastpathEncMapUintptrStringR, (*decFnInfo).fastpathDecMapUintptrStringR) - fn(map[uintptr]uint(nil), (*encFnInfo).fastpathEncMapUintptrUintR, (*decFnInfo).fastpathDecMapUintptrUintR) - fn(map[uintptr]uint8(nil), (*encFnInfo).fastpathEncMapUintptrUint8R, (*decFnInfo).fastpathDecMapUintptrUint8R) - fn(map[uintptr]uint16(nil), (*encFnInfo).fastpathEncMapUintptrUint16R, (*decFnInfo).fastpathDecMapUintptrUint16R) - fn(map[uintptr]uint32(nil), (*encFnInfo).fastpathEncMapUintptrUint32R, (*decFnInfo).fastpathDecMapUintptrUint32R) - fn(map[uintptr]uint64(nil), (*encFnInfo).fastpathEncMapUintptrUint64R, (*decFnInfo).fastpathDecMapUintptrUint64R) - fn(map[uintptr]uintptr(nil), (*encFnInfo).fastpathEncMapUintptrUintptrR, (*decFnInfo).fastpathDecMapUintptrUintptrR) - fn(map[uintptr]int(nil), (*encFnInfo).fastpathEncMapUintptrIntR, (*decFnInfo).fastpathDecMapUintptrIntR) - fn(map[uintptr]int8(nil), (*encFnInfo).fastpathEncMapUintptrInt8R, (*decFnInfo).fastpathDecMapUintptrInt8R) - fn(map[uintptr]int16(nil), (*encFnInfo).fastpathEncMapUintptrInt16R, (*decFnInfo).fastpathDecMapUintptrInt16R) - fn(map[uintptr]int32(nil), (*encFnInfo).fastpathEncMapUintptrInt32R, (*decFnInfo).fastpathDecMapUintptrInt32R) - fn(map[uintptr]int64(nil), (*encFnInfo).fastpathEncMapUintptrInt64R, (*decFnInfo).fastpathDecMapUintptrInt64R) - fn(map[uintptr]float32(nil), (*encFnInfo).fastpathEncMapUintptrFloat32R, (*decFnInfo).fastpathDecMapUintptrFloat32R) - fn(map[uintptr]float64(nil), (*encFnInfo).fastpathEncMapUintptrFloat64R, (*decFnInfo).fastpathDecMapUintptrFloat64R) - fn(map[uintptr]bool(nil), (*encFnInfo).fastpathEncMapUintptrBoolR, (*decFnInfo).fastpathDecMapUintptrBoolR) - fn(map[int]interface{}(nil), (*encFnInfo).fastpathEncMapIntIntfR, (*decFnInfo).fastpathDecMapIntIntfR) - fn(map[int]string(nil), (*encFnInfo).fastpathEncMapIntStringR, (*decFnInfo).fastpathDecMapIntStringR) - fn(map[int]uint(nil), (*encFnInfo).fastpathEncMapIntUintR, (*decFnInfo).fastpathDecMapIntUintR) - fn(map[int]uint8(nil), (*encFnInfo).fastpathEncMapIntUint8R, (*decFnInfo).fastpathDecMapIntUint8R) - fn(map[int]uint16(nil), (*encFnInfo).fastpathEncMapIntUint16R, (*decFnInfo).fastpathDecMapIntUint16R) - fn(map[int]uint32(nil), (*encFnInfo).fastpathEncMapIntUint32R, (*decFnInfo).fastpathDecMapIntUint32R) - fn(map[int]uint64(nil), (*encFnInfo).fastpathEncMapIntUint64R, (*decFnInfo).fastpathDecMapIntUint64R) - fn(map[int]uintptr(nil), (*encFnInfo).fastpathEncMapIntUintptrR, (*decFnInfo).fastpathDecMapIntUintptrR) - fn(map[int]int(nil), (*encFnInfo).fastpathEncMapIntIntR, (*decFnInfo).fastpathDecMapIntIntR) - fn(map[int]int8(nil), (*encFnInfo).fastpathEncMapIntInt8R, (*decFnInfo).fastpathDecMapIntInt8R) - fn(map[int]int16(nil), (*encFnInfo).fastpathEncMapIntInt16R, (*decFnInfo).fastpathDecMapIntInt16R) - fn(map[int]int32(nil), (*encFnInfo).fastpathEncMapIntInt32R, (*decFnInfo).fastpathDecMapIntInt32R) - fn(map[int]int64(nil), (*encFnInfo).fastpathEncMapIntInt64R, (*decFnInfo).fastpathDecMapIntInt64R) - fn(map[int]float32(nil), (*encFnInfo).fastpathEncMapIntFloat32R, (*decFnInfo).fastpathDecMapIntFloat32R) - fn(map[int]float64(nil), (*encFnInfo).fastpathEncMapIntFloat64R, (*decFnInfo).fastpathDecMapIntFloat64R) - fn(map[int]bool(nil), (*encFnInfo).fastpathEncMapIntBoolR, (*decFnInfo).fastpathDecMapIntBoolR) - fn(map[int8]interface{}(nil), (*encFnInfo).fastpathEncMapInt8IntfR, (*decFnInfo).fastpathDecMapInt8IntfR) - fn(map[int8]string(nil), (*encFnInfo).fastpathEncMapInt8StringR, (*decFnInfo).fastpathDecMapInt8StringR) - fn(map[int8]uint(nil), (*encFnInfo).fastpathEncMapInt8UintR, (*decFnInfo).fastpathDecMapInt8UintR) - fn(map[int8]uint8(nil), (*encFnInfo).fastpathEncMapInt8Uint8R, (*decFnInfo).fastpathDecMapInt8Uint8R) - fn(map[int8]uint16(nil), (*encFnInfo).fastpathEncMapInt8Uint16R, (*decFnInfo).fastpathDecMapInt8Uint16R) - fn(map[int8]uint32(nil), (*encFnInfo).fastpathEncMapInt8Uint32R, (*decFnInfo).fastpathDecMapInt8Uint32R) - fn(map[int8]uint64(nil), (*encFnInfo).fastpathEncMapInt8Uint64R, (*decFnInfo).fastpathDecMapInt8Uint64R) - fn(map[int8]uintptr(nil), (*encFnInfo).fastpathEncMapInt8UintptrR, (*decFnInfo).fastpathDecMapInt8UintptrR) - fn(map[int8]int(nil), (*encFnInfo).fastpathEncMapInt8IntR, (*decFnInfo).fastpathDecMapInt8IntR) - fn(map[int8]int8(nil), (*encFnInfo).fastpathEncMapInt8Int8R, (*decFnInfo).fastpathDecMapInt8Int8R) - fn(map[int8]int16(nil), (*encFnInfo).fastpathEncMapInt8Int16R, (*decFnInfo).fastpathDecMapInt8Int16R) - fn(map[int8]int32(nil), (*encFnInfo).fastpathEncMapInt8Int32R, (*decFnInfo).fastpathDecMapInt8Int32R) - fn(map[int8]int64(nil), (*encFnInfo).fastpathEncMapInt8Int64R, (*decFnInfo).fastpathDecMapInt8Int64R) - fn(map[int8]float32(nil), (*encFnInfo).fastpathEncMapInt8Float32R, (*decFnInfo).fastpathDecMapInt8Float32R) - fn(map[int8]float64(nil), (*encFnInfo).fastpathEncMapInt8Float64R, (*decFnInfo).fastpathDecMapInt8Float64R) - fn(map[int8]bool(nil), (*encFnInfo).fastpathEncMapInt8BoolR, (*decFnInfo).fastpathDecMapInt8BoolR) - fn(map[int16]interface{}(nil), (*encFnInfo).fastpathEncMapInt16IntfR, (*decFnInfo).fastpathDecMapInt16IntfR) - fn(map[int16]string(nil), (*encFnInfo).fastpathEncMapInt16StringR, (*decFnInfo).fastpathDecMapInt16StringR) - fn(map[int16]uint(nil), (*encFnInfo).fastpathEncMapInt16UintR, (*decFnInfo).fastpathDecMapInt16UintR) - fn(map[int16]uint8(nil), (*encFnInfo).fastpathEncMapInt16Uint8R, (*decFnInfo).fastpathDecMapInt16Uint8R) - fn(map[int16]uint16(nil), (*encFnInfo).fastpathEncMapInt16Uint16R, (*decFnInfo).fastpathDecMapInt16Uint16R) - fn(map[int16]uint32(nil), (*encFnInfo).fastpathEncMapInt16Uint32R, (*decFnInfo).fastpathDecMapInt16Uint32R) - fn(map[int16]uint64(nil), (*encFnInfo).fastpathEncMapInt16Uint64R, (*decFnInfo).fastpathDecMapInt16Uint64R) - fn(map[int16]uintptr(nil), (*encFnInfo).fastpathEncMapInt16UintptrR, (*decFnInfo).fastpathDecMapInt16UintptrR) - fn(map[int16]int(nil), (*encFnInfo).fastpathEncMapInt16IntR, (*decFnInfo).fastpathDecMapInt16IntR) - fn(map[int16]int8(nil), (*encFnInfo).fastpathEncMapInt16Int8R, (*decFnInfo).fastpathDecMapInt16Int8R) - fn(map[int16]int16(nil), (*encFnInfo).fastpathEncMapInt16Int16R, (*decFnInfo).fastpathDecMapInt16Int16R) - fn(map[int16]int32(nil), (*encFnInfo).fastpathEncMapInt16Int32R, (*decFnInfo).fastpathDecMapInt16Int32R) - fn(map[int16]int64(nil), (*encFnInfo).fastpathEncMapInt16Int64R, (*decFnInfo).fastpathDecMapInt16Int64R) - fn(map[int16]float32(nil), (*encFnInfo).fastpathEncMapInt16Float32R, (*decFnInfo).fastpathDecMapInt16Float32R) - fn(map[int16]float64(nil), (*encFnInfo).fastpathEncMapInt16Float64R, (*decFnInfo).fastpathDecMapInt16Float64R) - fn(map[int16]bool(nil), (*encFnInfo).fastpathEncMapInt16BoolR, (*decFnInfo).fastpathDecMapInt16BoolR) - fn(map[int32]interface{}(nil), (*encFnInfo).fastpathEncMapInt32IntfR, (*decFnInfo).fastpathDecMapInt32IntfR) - fn(map[int32]string(nil), (*encFnInfo).fastpathEncMapInt32StringR, (*decFnInfo).fastpathDecMapInt32StringR) - fn(map[int32]uint(nil), (*encFnInfo).fastpathEncMapInt32UintR, (*decFnInfo).fastpathDecMapInt32UintR) - fn(map[int32]uint8(nil), (*encFnInfo).fastpathEncMapInt32Uint8R, (*decFnInfo).fastpathDecMapInt32Uint8R) - fn(map[int32]uint16(nil), (*encFnInfo).fastpathEncMapInt32Uint16R, (*decFnInfo).fastpathDecMapInt32Uint16R) - fn(map[int32]uint32(nil), (*encFnInfo).fastpathEncMapInt32Uint32R, (*decFnInfo).fastpathDecMapInt32Uint32R) - fn(map[int32]uint64(nil), (*encFnInfo).fastpathEncMapInt32Uint64R, (*decFnInfo).fastpathDecMapInt32Uint64R) - fn(map[int32]uintptr(nil), (*encFnInfo).fastpathEncMapInt32UintptrR, (*decFnInfo).fastpathDecMapInt32UintptrR) - fn(map[int32]int(nil), (*encFnInfo).fastpathEncMapInt32IntR, (*decFnInfo).fastpathDecMapInt32IntR) - fn(map[int32]int8(nil), (*encFnInfo).fastpathEncMapInt32Int8R, (*decFnInfo).fastpathDecMapInt32Int8R) - fn(map[int32]int16(nil), (*encFnInfo).fastpathEncMapInt32Int16R, (*decFnInfo).fastpathDecMapInt32Int16R) - fn(map[int32]int32(nil), (*encFnInfo).fastpathEncMapInt32Int32R, (*decFnInfo).fastpathDecMapInt32Int32R) - fn(map[int32]int64(nil), (*encFnInfo).fastpathEncMapInt32Int64R, (*decFnInfo).fastpathDecMapInt32Int64R) - fn(map[int32]float32(nil), (*encFnInfo).fastpathEncMapInt32Float32R, (*decFnInfo).fastpathDecMapInt32Float32R) - fn(map[int32]float64(nil), (*encFnInfo).fastpathEncMapInt32Float64R, (*decFnInfo).fastpathDecMapInt32Float64R) - fn(map[int32]bool(nil), (*encFnInfo).fastpathEncMapInt32BoolR, (*decFnInfo).fastpathDecMapInt32BoolR) - fn(map[int64]interface{}(nil), (*encFnInfo).fastpathEncMapInt64IntfR, (*decFnInfo).fastpathDecMapInt64IntfR) - fn(map[int64]string(nil), (*encFnInfo).fastpathEncMapInt64StringR, (*decFnInfo).fastpathDecMapInt64StringR) - fn(map[int64]uint(nil), (*encFnInfo).fastpathEncMapInt64UintR, (*decFnInfo).fastpathDecMapInt64UintR) - fn(map[int64]uint8(nil), (*encFnInfo).fastpathEncMapInt64Uint8R, (*decFnInfo).fastpathDecMapInt64Uint8R) - fn(map[int64]uint16(nil), (*encFnInfo).fastpathEncMapInt64Uint16R, (*decFnInfo).fastpathDecMapInt64Uint16R) - fn(map[int64]uint32(nil), (*encFnInfo).fastpathEncMapInt64Uint32R, (*decFnInfo).fastpathDecMapInt64Uint32R) - fn(map[int64]uint64(nil), (*encFnInfo).fastpathEncMapInt64Uint64R, (*decFnInfo).fastpathDecMapInt64Uint64R) - fn(map[int64]uintptr(nil), (*encFnInfo).fastpathEncMapInt64UintptrR, (*decFnInfo).fastpathDecMapInt64UintptrR) - fn(map[int64]int(nil), (*encFnInfo).fastpathEncMapInt64IntR, (*decFnInfo).fastpathDecMapInt64IntR) - fn(map[int64]int8(nil), (*encFnInfo).fastpathEncMapInt64Int8R, (*decFnInfo).fastpathDecMapInt64Int8R) - fn(map[int64]int16(nil), (*encFnInfo).fastpathEncMapInt64Int16R, (*decFnInfo).fastpathDecMapInt64Int16R) - fn(map[int64]int32(nil), (*encFnInfo).fastpathEncMapInt64Int32R, (*decFnInfo).fastpathDecMapInt64Int32R) - fn(map[int64]int64(nil), (*encFnInfo).fastpathEncMapInt64Int64R, (*decFnInfo).fastpathDecMapInt64Int64R) - fn(map[int64]float32(nil), (*encFnInfo).fastpathEncMapInt64Float32R, (*decFnInfo).fastpathDecMapInt64Float32R) - fn(map[int64]float64(nil), (*encFnInfo).fastpathEncMapInt64Float64R, (*decFnInfo).fastpathDecMapInt64Float64R) - fn(map[int64]bool(nil), (*encFnInfo).fastpathEncMapInt64BoolR, (*decFnInfo).fastpathDecMapInt64BoolR) - fn(map[bool]interface{}(nil), (*encFnInfo).fastpathEncMapBoolIntfR, (*decFnInfo).fastpathDecMapBoolIntfR) - fn(map[bool]string(nil), (*encFnInfo).fastpathEncMapBoolStringR, (*decFnInfo).fastpathDecMapBoolStringR) - fn(map[bool]uint(nil), (*encFnInfo).fastpathEncMapBoolUintR, (*decFnInfo).fastpathDecMapBoolUintR) - fn(map[bool]uint8(nil), (*encFnInfo).fastpathEncMapBoolUint8R, (*decFnInfo).fastpathDecMapBoolUint8R) - fn(map[bool]uint16(nil), (*encFnInfo).fastpathEncMapBoolUint16R, (*decFnInfo).fastpathDecMapBoolUint16R) - fn(map[bool]uint32(nil), (*encFnInfo).fastpathEncMapBoolUint32R, (*decFnInfo).fastpathDecMapBoolUint32R) - fn(map[bool]uint64(nil), (*encFnInfo).fastpathEncMapBoolUint64R, (*decFnInfo).fastpathDecMapBoolUint64R) - fn(map[bool]uintptr(nil), (*encFnInfo).fastpathEncMapBoolUintptrR, (*decFnInfo).fastpathDecMapBoolUintptrR) - fn(map[bool]int(nil), (*encFnInfo).fastpathEncMapBoolIntR, (*decFnInfo).fastpathDecMapBoolIntR) - fn(map[bool]int8(nil), (*encFnInfo).fastpathEncMapBoolInt8R, (*decFnInfo).fastpathDecMapBoolInt8R) - fn(map[bool]int16(nil), (*encFnInfo).fastpathEncMapBoolInt16R, (*decFnInfo).fastpathDecMapBoolInt16R) - fn(map[bool]int32(nil), (*encFnInfo).fastpathEncMapBoolInt32R, (*decFnInfo).fastpathDecMapBoolInt32R) - fn(map[bool]int64(nil), (*encFnInfo).fastpathEncMapBoolInt64R, (*decFnInfo).fastpathDecMapBoolInt64R) - fn(map[bool]float32(nil), (*encFnInfo).fastpathEncMapBoolFloat32R, (*decFnInfo).fastpathDecMapBoolFloat32R) - fn(map[bool]float64(nil), (*encFnInfo).fastpathEncMapBoolFloat64R, (*decFnInfo).fastpathDecMapBoolFloat64R) - fn(map[bool]bool(nil), (*encFnInfo).fastpathEncMapBoolBoolR, (*decFnInfo).fastpathDecMapBoolBoolR) + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) sort.Sort(fastpathAslice(fastpathAV[:])) } @@ -375,2734 +381,1359 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { switch v := iv.(type) { case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceIntfV(v, e) case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceIntfV(*v, e) case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfIntfV(v, e) case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfIntfV(*v, e) case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfStringV(v, e) case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfStringV(*v, e) case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUintV(v, e) case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUintV(*v, e) case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint8V(v, e) case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint8V(*v, e) case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint16V(v, e) case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint16V(*v, e) case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint32V(v, e) case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint32V(*v, e) case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint64V(v, e) case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUint64V(*v, e) case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUintptrV(v, e) case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfUintptrV(*v, e) case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfIntV(v, e) case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfIntV(*v, e) case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt8V(v, e) case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt8V(*v, e) case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt16V(v, e) case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt16V(*v, e) case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt32V(v, e) case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt32V(*v, e) case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt64V(v, e) case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfInt64V(*v, e) case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfFloat32V(v, e) case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfFloat32V(*v, e) case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfFloat64V(v, e) case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfFloat64V(*v, e) case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfBoolV(v, e) case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntfBoolV(*v, e) case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceStringV(v, e) case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceStringV(*v, e) case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringIntfV(v, e) case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringIntfV(*v, e) case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringStringV(v, e) case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringStringV(*v, e) case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUintV(v, e) case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUintV(*v, e) case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint8V(v, e) case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint8V(*v, e) case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint16V(v, e) case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint16V(*v, e) case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint32V(v, e) case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint32V(*v, e) case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint64V(v, e) case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUint64V(*v, e) case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUintptrV(v, e) case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringUintptrV(*v, e) case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringIntV(v, e) case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringIntV(*v, e) case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt8V(v, e) case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt8V(*v, e) case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt16V(v, e) case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt16V(*v, e) case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt32V(v, e) case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt32V(*v, e) case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt64V(v, e) case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringInt64V(*v, e) case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringFloat32V(v, e) case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringFloat32V(*v, e) case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringFloat64V(v, e) case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringFloat64V(*v, e) case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringBoolV(v, e) case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapStringBoolV(*v, e) case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceFloat32V(v, e) case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceFloat32V(*v, e) case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32IntfV(v, e) case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32IntfV(*v, e) case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32StringV(v, e) case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32StringV(*v, e) case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32UintV(v, e) case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32UintV(*v, e) case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint8V(v, e) case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint8V(*v, e) case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint16V(v, e) case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint16V(*v, e) case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint32V(v, e) case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint32V(*v, e) case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint64V(v, e) case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Uint64V(*v, e) case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32UintptrV(v, e) case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32UintptrV(*v, e) case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32IntV(v, e) case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32IntV(*v, e) case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int8V(v, e) case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int8V(*v, e) case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int16V(v, e) case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int16V(*v, e) case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int32V(v, e) case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int32V(*v, e) case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int64V(v, e) case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Int64V(*v, e) case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Float32V(v, e) case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Float32V(*v, e) case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Float64V(v, e) case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32Float64V(*v, e) case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32BoolV(v, e) case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat32BoolV(*v, e) case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceFloat64V(v, e) case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceFloat64V(*v, e) case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64IntfV(v, e) case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64IntfV(*v, e) case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64StringV(v, e) case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64StringV(*v, e) case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64UintV(v, e) case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64UintV(*v, e) case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint8V(v, e) case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint8V(*v, e) case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint16V(v, e) case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint16V(*v, e) case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint32V(v, e) case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint32V(*v, e) case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint64V(v, e) case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Uint64V(*v, e) case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64UintptrV(v, e) case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64UintptrV(*v, e) case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64IntV(v, e) case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64IntV(*v, e) case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int8V(v, e) case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int8V(*v, e) case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int16V(v, e) case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int16V(*v, e) case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int32V(v, e) case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int32V(*v, e) case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int64V(v, e) case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Int64V(*v, e) case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Float32V(v, e) case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Float32V(*v, e) case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Float64V(v, e) case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64Float64V(*v, e) case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64BoolV(v, e) case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapFloat64BoolV(*v, e) case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUintV(v, e) case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUintV(*v, e) case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintIntfV(v, e) case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintIntfV(*v, e) case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintStringV(v, e) case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintStringV(*v, e) case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUintV(v, e) case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUintV(*v, e) case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint8V(v, e) case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint8V(*v, e) case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint16V(v, e) case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint16V(*v, e) case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint32V(v, e) case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint32V(*v, e) case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint64V(v, e) case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUint64V(*v, e) case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUintptrV(v, e) case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintUintptrV(*v, e) case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintIntV(v, e) case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintIntV(*v, e) case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt8V(v, e) case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt8V(*v, e) case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt16V(v, e) case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt16V(*v, e) case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt32V(v, e) case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt32V(*v, e) case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt64V(v, e) case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintInt64V(*v, e) case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintFloat32V(v, e) case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintFloat32V(*v, e) case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintFloat64V(v, e) case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintFloat64V(*v, e) case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintBoolV(v, e) case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintBoolV(*v, e) case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8IntfV(v, e) case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8IntfV(*v, e) case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8StringV(v, e) case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8StringV(*v, e) case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8UintV(v, e) case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8UintV(*v, e) case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint8V(v, e) case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint8V(*v, e) case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint16V(v, e) case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint16V(*v, e) case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint32V(v, e) case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint32V(*v, e) case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint64V(v, e) case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Uint64V(*v, e) case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8UintptrV(v, e) case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8UintptrV(*v, e) case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8IntV(v, e) case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8IntV(*v, e) case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int8V(v, e) case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int8V(*v, e) case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int16V(v, e) case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int16V(*v, e) case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int32V(v, e) case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int32V(*v, e) case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int64V(v, e) case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Int64V(*v, e) case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Float32V(v, e) case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Float32V(*v, e) case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Float64V(v, e) case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8Float64V(*v, e) case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8BoolV(v, e) case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint8BoolV(*v, e) case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint16V(v, e) case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint16V(*v, e) case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16IntfV(v, e) case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16IntfV(*v, e) case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16StringV(v, e) case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16StringV(*v, e) case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16UintV(v, e) case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16UintV(*v, e) case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint8V(v, e) case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint8V(*v, e) case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint16V(v, e) case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint16V(*v, e) case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint32V(v, e) case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint32V(*v, e) case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint64V(v, e) case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Uint64V(*v, e) case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16UintptrV(v, e) case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16UintptrV(*v, e) case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16IntV(v, e) case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16IntV(*v, e) case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int8V(v, e) case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int8V(*v, e) case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int16V(v, e) case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int16V(*v, e) case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int32V(v, e) case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int32V(*v, e) case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int64V(v, e) case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Int64V(*v, e) case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Float32V(v, e) case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Float32V(*v, e) case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Float64V(v, e) case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16Float64V(*v, e) case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16BoolV(v, e) case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint16BoolV(*v, e) case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint32V(v, e) case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint32V(*v, e) case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32IntfV(v, e) case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32IntfV(*v, e) case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32StringV(v, e) case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32StringV(*v, e) case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32UintV(v, e) case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32UintV(*v, e) case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint8V(v, e) case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint8V(*v, e) case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint16V(v, e) case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint16V(*v, e) case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint32V(v, e) case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint32V(*v, e) case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint64V(v, e) case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Uint64V(*v, e) case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32UintptrV(v, e) case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32UintptrV(*v, e) case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32IntV(v, e) case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32IntV(*v, e) case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int8V(v, e) case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int8V(*v, e) case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int16V(v, e) case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int16V(*v, e) case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int32V(v, e) case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int32V(*v, e) case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int64V(v, e) case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Int64V(*v, e) case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Float32V(v, e) case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Float32V(*v, e) case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Float64V(v, e) case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32Float64V(*v, e) case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32BoolV(v, e) case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint32BoolV(*v, e) case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint64V(v, e) case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUint64V(*v, e) case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64IntfV(v, e) case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64IntfV(*v, e) case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64StringV(v, e) case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64StringV(*v, e) case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64UintV(v, e) case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64UintV(*v, e) case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint8V(v, e) case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint8V(*v, e) case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint16V(v, e) case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint16V(*v, e) case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint32V(v, e) case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint32V(*v, e) case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint64V(v, e) case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Uint64V(*v, e) case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64UintptrV(v, e) case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64UintptrV(*v, e) case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64IntV(v, e) case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64IntV(*v, e) case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int8V(v, e) case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int8V(*v, e) case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int16V(v, e) case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int16V(*v, e) case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int32V(v, e) case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int32V(*v, e) case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int64V(v, e) case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Int64V(*v, e) case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Float32V(v, e) case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Float32V(*v, e) case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Float64V(v, e) case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64Float64V(*v, e) case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64BoolV(v, e) case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUint64BoolV(*v, e) case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUintptrV(v, e) case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceUintptrV(*v, e) case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrIntfV(v, e) case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrIntfV(*v, e) case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrStringV(v, e) case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrStringV(*v, e) case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUintV(v, e) case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUintV(*v, e) case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint8V(v, e) case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint8V(*v, e) case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint16V(v, e) case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint16V(*v, e) case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint32V(v, e) case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint32V(*v, e) case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint64V(v, e) case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUint64V(*v, e) case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUintptrV(v, e) case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrUintptrV(*v, e) case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrIntV(v, e) case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrIntV(*v, e) case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt8V(v, e) case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt8V(*v, e) case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt16V(v, e) case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt16V(*v, e) case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt32V(v, e) case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt32V(*v, e) case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt64V(v, e) case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrInt64V(*v, e) case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrFloat32V(v, e) case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrFloat32V(*v, e) case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrFloat64V(v, e) case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrFloat64V(*v, e) case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrBoolV(v, e) case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapUintptrBoolV(*v, e) case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceIntV(v, e) case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceIntV(*v, e) case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntIntfV(v, e) case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntIntfV(*v, e) case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntStringV(v, e) case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntStringV(*v, e) case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUintV(v, e) case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUintV(*v, e) case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint8V(v, e) case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint8V(*v, e) case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint16V(v, e) case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint16V(*v, e) case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint32V(v, e) case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint32V(*v, e) case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint64V(v, e) case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUint64V(*v, e) case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUintptrV(v, e) case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntUintptrV(*v, e) case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntIntV(v, e) case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntIntV(*v, e) case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt8V(v, e) case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt8V(*v, e) case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt16V(v, e) case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt16V(*v, e) case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt32V(v, e) case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt32V(*v, e) case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt64V(v, e) case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntInt64V(*v, e) case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntFloat32V(v, e) case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntFloat32V(*v, e) case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntFloat64V(v, e) case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntFloat64V(*v, e) case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntBoolV(v, e) case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapIntBoolV(*v, e) case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt8V(v, e) case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt8V(*v, e) case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8IntfV(v, e) case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8IntfV(*v, e) case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8StringV(v, e) case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8StringV(*v, e) case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8UintV(v, e) case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8UintV(*v, e) case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint8V(v, e) case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint8V(*v, e) case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint16V(v, e) case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint16V(*v, e) case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint32V(v, e) case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint32V(*v, e) case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint64V(v, e) case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Uint64V(*v, e) case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8UintptrV(v, e) case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8UintptrV(*v, e) case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8IntV(v, e) case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8IntV(*v, e) case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int8V(v, e) case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int8V(*v, e) case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int16V(v, e) case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int16V(*v, e) case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int32V(v, e) case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int32V(*v, e) case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int64V(v, e) case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Int64V(*v, e) case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Float32V(v, e) case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Float32V(*v, e) case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Float64V(v, e) case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8Float64V(*v, e) case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8BoolV(v, e) case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt8BoolV(*v, e) case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt16V(v, e) case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt16V(*v, e) case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16IntfV(v, e) case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16IntfV(*v, e) case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16StringV(v, e) case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16StringV(*v, e) case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16UintV(v, e) case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16UintV(*v, e) case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint8V(v, e) case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint8V(*v, e) case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint16V(v, e) case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint16V(*v, e) case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint32V(v, e) case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint32V(*v, e) case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint64V(v, e) case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Uint64V(*v, e) case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16UintptrV(v, e) case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16UintptrV(*v, e) case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16IntV(v, e) case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16IntV(*v, e) case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int8V(v, e) case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int8V(*v, e) case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int16V(v, e) case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int16V(*v, e) case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int32V(v, e) case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int32V(*v, e) case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int64V(v, e) case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Int64V(*v, e) case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Float32V(v, e) case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Float32V(*v, e) case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Float64V(v, e) case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16Float64V(*v, e) case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16BoolV(v, e) case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt16BoolV(*v, e) case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt32V(v, e) case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt32V(*v, e) case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32IntfV(v, e) case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32IntfV(*v, e) case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32StringV(v, e) case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32StringV(*v, e) case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32UintV(v, e) case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32UintV(*v, e) case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint8V(v, e) case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint8V(*v, e) case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint16V(v, e) case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint16V(*v, e) case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint32V(v, e) case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint32V(*v, e) case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint64V(v, e) case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Uint64V(*v, e) case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32UintptrV(v, e) case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32UintptrV(*v, e) case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32IntV(v, e) case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32IntV(*v, e) case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int8V(v, e) case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int8V(*v, e) case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int16V(v, e) case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int16V(*v, e) case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int32V(v, e) case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int32V(*v, e) case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int64V(v, e) case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Int64V(*v, e) case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Float32V(v, e) case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Float32V(*v, e) case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Float64V(v, e) case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32Float64V(*v, e) case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32BoolV(v, e) case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt32BoolV(*v, e) case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt64V(v, e) case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceInt64V(*v, e) case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64IntfV(v, e) case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64IntfV(*v, e) case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64StringV(v, e) case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64StringV(*v, e) case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64UintV(v, e) case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64UintV(*v, e) case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint8V(v, e) case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint8V(*v, e) case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint16V(v, e) case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint16V(*v, e) case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint32V(v, e) case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint32V(*v, e) case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint64V(v, e) case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Uint64V(*v, e) case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64UintptrV(v, e) case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64UintptrV(*v, e) case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64IntV(v, e) case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64IntV(*v, e) case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int8V(v, e) case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int8V(*v, e) case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int16V(v, e) case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int16V(*v, e) case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int32V(v, e) case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int32V(*v, e) case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int64V(v, e) case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Int64V(*v, e) case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Float32V(v, e) case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Float32V(*v, e) case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Float64V(v, e) case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64Float64V(*v, e) case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64BoolV(v, e) case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapInt64BoolV(*v, e) case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceBoolV(v, e) case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncSliceBoolV(*v, e) case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolIntfV(v, e) case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolIntfV(*v, e) case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolStringV(v, e) case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolStringV(*v, e) case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUintV(v, e) case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUintV(*v, e) case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint8V(v, e) case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint8V(*v, e) case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint16V(v, e) case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint16V(*v, e) case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint32V(v, e) case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint32V(*v, e) case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint64V(v, e) case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUint64V(*v, e) case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUintptrV(v, e) case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolUintptrV(*v, e) case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolIntV(v, e) case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolIntV(*v, e) case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt8V(v, e) case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt8V(*v, e) case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt16V(v, e) case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt16V(*v, e) case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt32V(v, e) case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt32V(*v, e) case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt64V(v, e) case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolInt64V(*v, e) case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolFloat32V(v, e) case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolFloat32V(*v, e) case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolFloat64V(v, e) case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolFloat64V(*v, e) case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolBoolV(v, e) case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case []interface{}: - fastpathTV.EncSliceIntfV(v, fastpathCheckNilTrue, e) - case *[]interface{}: - fastpathTV.EncSliceIntfV(*v, fastpathCheckNilTrue, e) - - case []string: - fastpathTV.EncSliceStringV(v, fastpathCheckNilTrue, e) - case *[]string: - fastpathTV.EncSliceStringV(*v, fastpathCheckNilTrue, e) - - case []float32: - fastpathTV.EncSliceFloat32V(v, fastpathCheckNilTrue, e) - case *[]float32: - fastpathTV.EncSliceFloat32V(*v, fastpathCheckNilTrue, e) - - case []float64: - fastpathTV.EncSliceFloat64V(v, fastpathCheckNilTrue, e) - case *[]float64: - fastpathTV.EncSliceFloat64V(*v, fastpathCheckNilTrue, e) - - case []uint: - fastpathTV.EncSliceUintV(v, fastpathCheckNilTrue, e) - case *[]uint: - fastpathTV.EncSliceUintV(*v, fastpathCheckNilTrue, e) - - case []uint16: - fastpathTV.EncSliceUint16V(v, fastpathCheckNilTrue, e) - case *[]uint16: - fastpathTV.EncSliceUint16V(*v, fastpathCheckNilTrue, e) - - case []uint32: - fastpathTV.EncSliceUint32V(v, fastpathCheckNilTrue, e) - case *[]uint32: - fastpathTV.EncSliceUint32V(*v, fastpathCheckNilTrue, e) - - case []uint64: - fastpathTV.EncSliceUint64V(v, fastpathCheckNilTrue, e) - case *[]uint64: - fastpathTV.EncSliceUint64V(*v, fastpathCheckNilTrue, e) - - case []uintptr: - fastpathTV.EncSliceUintptrV(v, fastpathCheckNilTrue, e) - case *[]uintptr: - fastpathTV.EncSliceUintptrV(*v, fastpathCheckNilTrue, e) - - case []int: - fastpathTV.EncSliceIntV(v, fastpathCheckNilTrue, e) - case *[]int: - fastpathTV.EncSliceIntV(*v, fastpathCheckNilTrue, e) - - case []int8: - fastpathTV.EncSliceInt8V(v, fastpathCheckNilTrue, e) - case *[]int8: - fastpathTV.EncSliceInt8V(*v, fastpathCheckNilTrue, e) - - case []int16: - fastpathTV.EncSliceInt16V(v, fastpathCheckNilTrue, e) - case *[]int16: - fastpathTV.EncSliceInt16V(*v, fastpathCheckNilTrue, e) - - case []int32: - fastpathTV.EncSliceInt32V(v, fastpathCheckNilTrue, e) - case *[]int32: - fastpathTV.EncSliceInt32V(*v, fastpathCheckNilTrue, e) - - case []int64: - fastpathTV.EncSliceInt64V(v, fastpathCheckNilTrue, e) - case *[]int64: - fastpathTV.EncSliceInt64V(*v, fastpathCheckNilTrue, e) - - case []bool: - fastpathTV.EncSliceBoolV(v, fastpathCheckNilTrue, e) - case *[]bool: - fastpathTV.EncSliceBoolV(*v, fastpathCheckNilTrue, e) - - default: - _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) - return false - } - return true -} - -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - switch v := iv.(type) { - - case map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(v, fastpathCheckNilTrue, e) - case *map[interface{}]interface{}: - fastpathTV.EncMapIntfIntfV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]string: - fastpathTV.EncMapIntfStringV(v, fastpathCheckNilTrue, e) - case *map[interface{}]string: - fastpathTV.EncMapIntfStringV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint: - fastpathTV.EncMapIntfUintV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint: - fastpathTV.EncMapIntfUintV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint8: - fastpathTV.EncMapIntfUint8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint16: - fastpathTV.EncMapIntfUint16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint32: - fastpathTV.EncMapIntfUint32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]uint64: - fastpathTV.EncMapIntfUint64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(v, fastpathCheckNilTrue, e) - case *map[interface{}]uintptr: - fastpathTV.EncMapIntfUintptrV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int: - fastpathTV.EncMapIntfIntV(v, fastpathCheckNilTrue, e) - case *map[interface{}]int: - fastpathTV.EncMapIntfIntV(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int8: - fastpathTV.EncMapIntfInt8V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int16: - fastpathTV.EncMapIntfInt16V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int32: - fastpathTV.EncMapIntfInt32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]int64: - fastpathTV.EncMapIntfInt64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float32: - fastpathTV.EncMapIntfFloat32V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(v, fastpathCheckNilTrue, e) - case *map[interface{}]float64: - fastpathTV.EncMapIntfFloat64V(*v, fastpathCheckNilTrue, e) - - case map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(v, fastpathCheckNilTrue, e) - case *map[interface{}]bool: - fastpathTV.EncMapIntfBoolV(*v, fastpathCheckNilTrue, e) - - case map[string]interface{}: - fastpathTV.EncMapStringIntfV(v, fastpathCheckNilTrue, e) - case *map[string]interface{}: - fastpathTV.EncMapStringIntfV(*v, fastpathCheckNilTrue, e) - - case map[string]string: - fastpathTV.EncMapStringStringV(v, fastpathCheckNilTrue, e) - case *map[string]string: - fastpathTV.EncMapStringStringV(*v, fastpathCheckNilTrue, e) - - case map[string]uint: - fastpathTV.EncMapStringUintV(v, fastpathCheckNilTrue, e) - case *map[string]uint: - fastpathTV.EncMapStringUintV(*v, fastpathCheckNilTrue, e) - - case map[string]uint8: - fastpathTV.EncMapStringUint8V(v, fastpathCheckNilTrue, e) - case *map[string]uint8: - fastpathTV.EncMapStringUint8V(*v, fastpathCheckNilTrue, e) - - case map[string]uint16: - fastpathTV.EncMapStringUint16V(v, fastpathCheckNilTrue, e) - case *map[string]uint16: - fastpathTV.EncMapStringUint16V(*v, fastpathCheckNilTrue, e) - - case map[string]uint32: - fastpathTV.EncMapStringUint32V(v, fastpathCheckNilTrue, e) - case *map[string]uint32: - fastpathTV.EncMapStringUint32V(*v, fastpathCheckNilTrue, e) - - case map[string]uint64: - fastpathTV.EncMapStringUint64V(v, fastpathCheckNilTrue, e) - case *map[string]uint64: - fastpathTV.EncMapStringUint64V(*v, fastpathCheckNilTrue, e) - - case map[string]uintptr: - fastpathTV.EncMapStringUintptrV(v, fastpathCheckNilTrue, e) - case *map[string]uintptr: - fastpathTV.EncMapStringUintptrV(*v, fastpathCheckNilTrue, e) - - case map[string]int: - fastpathTV.EncMapStringIntV(v, fastpathCheckNilTrue, e) - case *map[string]int: - fastpathTV.EncMapStringIntV(*v, fastpathCheckNilTrue, e) - - case map[string]int8: - fastpathTV.EncMapStringInt8V(v, fastpathCheckNilTrue, e) - case *map[string]int8: - fastpathTV.EncMapStringInt8V(*v, fastpathCheckNilTrue, e) - - case map[string]int16: - fastpathTV.EncMapStringInt16V(v, fastpathCheckNilTrue, e) - case *map[string]int16: - fastpathTV.EncMapStringInt16V(*v, fastpathCheckNilTrue, e) - - case map[string]int32: - fastpathTV.EncMapStringInt32V(v, fastpathCheckNilTrue, e) - case *map[string]int32: - fastpathTV.EncMapStringInt32V(*v, fastpathCheckNilTrue, e) - - case map[string]int64: - fastpathTV.EncMapStringInt64V(v, fastpathCheckNilTrue, e) - case *map[string]int64: - fastpathTV.EncMapStringInt64V(*v, fastpathCheckNilTrue, e) - - case map[string]float32: - fastpathTV.EncMapStringFloat32V(v, fastpathCheckNilTrue, e) - case *map[string]float32: - fastpathTV.EncMapStringFloat32V(*v, fastpathCheckNilTrue, e) - - case map[string]float64: - fastpathTV.EncMapStringFloat64V(v, fastpathCheckNilTrue, e) - case *map[string]float64: - fastpathTV.EncMapStringFloat64V(*v, fastpathCheckNilTrue, e) - - case map[string]bool: - fastpathTV.EncMapStringBoolV(v, fastpathCheckNilTrue, e) - case *map[string]bool: - fastpathTV.EncMapStringBoolV(*v, fastpathCheckNilTrue, e) - - case map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(v, fastpathCheckNilTrue, e) - case *map[float32]interface{}: - fastpathTV.EncMapFloat32IntfV(*v, fastpathCheckNilTrue, e) - - case map[float32]string: - fastpathTV.EncMapFloat32StringV(v, fastpathCheckNilTrue, e) - case *map[float32]string: - fastpathTV.EncMapFloat32StringV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint: - fastpathTV.EncMapFloat32UintV(v, fastpathCheckNilTrue, e) - case *map[float32]uint: - fastpathTV.EncMapFloat32UintV(*v, fastpathCheckNilTrue, e) - - case map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(v, fastpathCheckNilTrue, e) - case *map[float32]uint8: - fastpathTV.EncMapFloat32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(v, fastpathCheckNilTrue, e) - case *map[float32]uint16: - fastpathTV.EncMapFloat32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(v, fastpathCheckNilTrue, e) - case *map[float32]uint32: - fastpathTV.EncMapFloat32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(v, fastpathCheckNilTrue, e) - case *map[float32]uint64: - fastpathTV.EncMapFloat32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(v, fastpathCheckNilTrue, e) - case *map[float32]uintptr: - fastpathTV.EncMapFloat32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float32]int: - fastpathTV.EncMapFloat32IntV(v, fastpathCheckNilTrue, e) - case *map[float32]int: - fastpathTV.EncMapFloat32IntV(*v, fastpathCheckNilTrue, e) - - case map[float32]int8: - fastpathTV.EncMapFloat32Int8V(v, fastpathCheckNilTrue, e) - case *map[float32]int8: - fastpathTV.EncMapFloat32Int8V(*v, fastpathCheckNilTrue, e) - - case map[float32]int16: - fastpathTV.EncMapFloat32Int16V(v, fastpathCheckNilTrue, e) - case *map[float32]int16: - fastpathTV.EncMapFloat32Int16V(*v, fastpathCheckNilTrue, e) - - case map[float32]int32: - fastpathTV.EncMapFloat32Int32V(v, fastpathCheckNilTrue, e) - case *map[float32]int32: - fastpathTV.EncMapFloat32Int32V(*v, fastpathCheckNilTrue, e) - - case map[float32]int64: - fastpathTV.EncMapFloat32Int64V(v, fastpathCheckNilTrue, e) - case *map[float32]int64: - fastpathTV.EncMapFloat32Int64V(*v, fastpathCheckNilTrue, e) - - case map[float32]float32: - fastpathTV.EncMapFloat32Float32V(v, fastpathCheckNilTrue, e) - case *map[float32]float32: - fastpathTV.EncMapFloat32Float32V(*v, fastpathCheckNilTrue, e) - - case map[float32]float64: - fastpathTV.EncMapFloat32Float64V(v, fastpathCheckNilTrue, e) - case *map[float32]float64: - fastpathTV.EncMapFloat32Float64V(*v, fastpathCheckNilTrue, e) - - case map[float32]bool: - fastpathTV.EncMapFloat32BoolV(v, fastpathCheckNilTrue, e) - case *map[float32]bool: - fastpathTV.EncMapFloat32BoolV(*v, fastpathCheckNilTrue, e) - - case map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(v, fastpathCheckNilTrue, e) - case *map[float64]interface{}: - fastpathTV.EncMapFloat64IntfV(*v, fastpathCheckNilTrue, e) - - case map[float64]string: - fastpathTV.EncMapFloat64StringV(v, fastpathCheckNilTrue, e) - case *map[float64]string: - fastpathTV.EncMapFloat64StringV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint: - fastpathTV.EncMapFloat64UintV(v, fastpathCheckNilTrue, e) - case *map[float64]uint: - fastpathTV.EncMapFloat64UintV(*v, fastpathCheckNilTrue, e) - - case map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(v, fastpathCheckNilTrue, e) - case *map[float64]uint8: - fastpathTV.EncMapFloat64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(v, fastpathCheckNilTrue, e) - case *map[float64]uint16: - fastpathTV.EncMapFloat64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(v, fastpathCheckNilTrue, e) - case *map[float64]uint32: - fastpathTV.EncMapFloat64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(v, fastpathCheckNilTrue, e) - case *map[float64]uint64: - fastpathTV.EncMapFloat64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(v, fastpathCheckNilTrue, e) - case *map[float64]uintptr: - fastpathTV.EncMapFloat64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[float64]int: - fastpathTV.EncMapFloat64IntV(v, fastpathCheckNilTrue, e) - case *map[float64]int: - fastpathTV.EncMapFloat64IntV(*v, fastpathCheckNilTrue, e) - - case map[float64]int8: - fastpathTV.EncMapFloat64Int8V(v, fastpathCheckNilTrue, e) - case *map[float64]int8: - fastpathTV.EncMapFloat64Int8V(*v, fastpathCheckNilTrue, e) - - case map[float64]int16: - fastpathTV.EncMapFloat64Int16V(v, fastpathCheckNilTrue, e) - case *map[float64]int16: - fastpathTV.EncMapFloat64Int16V(*v, fastpathCheckNilTrue, e) - - case map[float64]int32: - fastpathTV.EncMapFloat64Int32V(v, fastpathCheckNilTrue, e) - case *map[float64]int32: - fastpathTV.EncMapFloat64Int32V(*v, fastpathCheckNilTrue, e) - - case map[float64]int64: - fastpathTV.EncMapFloat64Int64V(v, fastpathCheckNilTrue, e) - case *map[float64]int64: - fastpathTV.EncMapFloat64Int64V(*v, fastpathCheckNilTrue, e) - - case map[float64]float32: - fastpathTV.EncMapFloat64Float32V(v, fastpathCheckNilTrue, e) - case *map[float64]float32: - fastpathTV.EncMapFloat64Float32V(*v, fastpathCheckNilTrue, e) - - case map[float64]float64: - fastpathTV.EncMapFloat64Float64V(v, fastpathCheckNilTrue, e) - case *map[float64]float64: - fastpathTV.EncMapFloat64Float64V(*v, fastpathCheckNilTrue, e) - - case map[float64]bool: - fastpathTV.EncMapFloat64BoolV(v, fastpathCheckNilTrue, e) - case *map[float64]bool: - fastpathTV.EncMapFloat64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint]interface{}: - fastpathTV.EncMapUintIntfV(v, fastpathCheckNilTrue, e) - case *map[uint]interface{}: - fastpathTV.EncMapUintIntfV(*v, fastpathCheckNilTrue, e) - - case map[uint]string: - fastpathTV.EncMapUintStringV(v, fastpathCheckNilTrue, e) - case *map[uint]string: - fastpathTV.EncMapUintStringV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint: - fastpathTV.EncMapUintUintV(v, fastpathCheckNilTrue, e) - case *map[uint]uint: - fastpathTV.EncMapUintUintV(*v, fastpathCheckNilTrue, e) - - case map[uint]uint8: - fastpathTV.EncMapUintUint8V(v, fastpathCheckNilTrue, e) - case *map[uint]uint8: - fastpathTV.EncMapUintUint8V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint16: - fastpathTV.EncMapUintUint16V(v, fastpathCheckNilTrue, e) - case *map[uint]uint16: - fastpathTV.EncMapUintUint16V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint32: - fastpathTV.EncMapUintUint32V(v, fastpathCheckNilTrue, e) - case *map[uint]uint32: - fastpathTV.EncMapUintUint32V(*v, fastpathCheckNilTrue, e) - - case map[uint]uint64: - fastpathTV.EncMapUintUint64V(v, fastpathCheckNilTrue, e) - case *map[uint]uint64: - fastpathTV.EncMapUintUint64V(*v, fastpathCheckNilTrue, e) - - case map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(v, fastpathCheckNilTrue, e) - case *map[uint]uintptr: - fastpathTV.EncMapUintUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint]int: - fastpathTV.EncMapUintIntV(v, fastpathCheckNilTrue, e) - case *map[uint]int: - fastpathTV.EncMapUintIntV(*v, fastpathCheckNilTrue, e) - - case map[uint]int8: - fastpathTV.EncMapUintInt8V(v, fastpathCheckNilTrue, e) - case *map[uint]int8: - fastpathTV.EncMapUintInt8V(*v, fastpathCheckNilTrue, e) - - case map[uint]int16: - fastpathTV.EncMapUintInt16V(v, fastpathCheckNilTrue, e) - case *map[uint]int16: - fastpathTV.EncMapUintInt16V(*v, fastpathCheckNilTrue, e) - - case map[uint]int32: - fastpathTV.EncMapUintInt32V(v, fastpathCheckNilTrue, e) - case *map[uint]int32: - fastpathTV.EncMapUintInt32V(*v, fastpathCheckNilTrue, e) - - case map[uint]int64: - fastpathTV.EncMapUintInt64V(v, fastpathCheckNilTrue, e) - case *map[uint]int64: - fastpathTV.EncMapUintInt64V(*v, fastpathCheckNilTrue, e) - - case map[uint]float32: - fastpathTV.EncMapUintFloat32V(v, fastpathCheckNilTrue, e) - case *map[uint]float32: - fastpathTV.EncMapUintFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uint]float64: - fastpathTV.EncMapUintFloat64V(v, fastpathCheckNilTrue, e) - case *map[uint]float64: - fastpathTV.EncMapUintFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uint]bool: - fastpathTV.EncMapUintBoolV(v, fastpathCheckNilTrue, e) - case *map[uint]bool: - fastpathTV.EncMapUintBoolV(*v, fastpathCheckNilTrue, e) - - case map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(v, fastpathCheckNilTrue, e) - case *map[uint8]interface{}: - fastpathTV.EncMapUint8IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint8]string: - fastpathTV.EncMapUint8StringV(v, fastpathCheckNilTrue, e) - case *map[uint8]string: - fastpathTV.EncMapUint8StringV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint: - fastpathTV.EncMapUint8UintV(v, fastpathCheckNilTrue, e) - case *map[uint8]uint: - fastpathTV.EncMapUint8UintV(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint8: - fastpathTV.EncMapUint8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint16: - fastpathTV.EncMapUint8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint32: - fastpathTV.EncMapUint8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint8]uint64: - fastpathTV.EncMapUint8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint8]uintptr: - fastpathTV.EncMapUint8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int: - fastpathTV.EncMapUint8IntV(v, fastpathCheckNilTrue, e) - case *map[uint8]int: - fastpathTV.EncMapUint8IntV(*v, fastpathCheckNilTrue, e) - - case map[uint8]int8: - fastpathTV.EncMapUint8Int8V(v, fastpathCheckNilTrue, e) - case *map[uint8]int8: - fastpathTV.EncMapUint8Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int16: - fastpathTV.EncMapUint8Int16V(v, fastpathCheckNilTrue, e) - case *map[uint8]int16: - fastpathTV.EncMapUint8Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int32: - fastpathTV.EncMapUint8Int32V(v, fastpathCheckNilTrue, e) - case *map[uint8]int32: - fastpathTV.EncMapUint8Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]int64: - fastpathTV.EncMapUint8Int64V(v, fastpathCheckNilTrue, e) - case *map[uint8]int64: - fastpathTV.EncMapUint8Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float32: - fastpathTV.EncMapUint8Float32V(v, fastpathCheckNilTrue, e) - case *map[uint8]float32: - fastpathTV.EncMapUint8Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint8]float64: - fastpathTV.EncMapUint8Float64V(v, fastpathCheckNilTrue, e) - case *map[uint8]float64: - fastpathTV.EncMapUint8Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint8]bool: - fastpathTV.EncMapUint8BoolV(v, fastpathCheckNilTrue, e) - case *map[uint8]bool: - fastpathTV.EncMapUint8BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(v, fastpathCheckNilTrue, e) - case *map[uint16]interface{}: - fastpathTV.EncMapUint16IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint16]string: - fastpathTV.EncMapUint16StringV(v, fastpathCheckNilTrue, e) - case *map[uint16]string: - fastpathTV.EncMapUint16StringV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint: - fastpathTV.EncMapUint16UintV(v, fastpathCheckNilTrue, e) - case *map[uint16]uint: - fastpathTV.EncMapUint16UintV(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint8: - fastpathTV.EncMapUint16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint16: - fastpathTV.EncMapUint16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint32: - fastpathTV.EncMapUint16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint16]uint64: - fastpathTV.EncMapUint16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint16]uintptr: - fastpathTV.EncMapUint16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int: - fastpathTV.EncMapUint16IntV(v, fastpathCheckNilTrue, e) - case *map[uint16]int: - fastpathTV.EncMapUint16IntV(*v, fastpathCheckNilTrue, e) - - case map[uint16]int8: - fastpathTV.EncMapUint16Int8V(v, fastpathCheckNilTrue, e) - case *map[uint16]int8: - fastpathTV.EncMapUint16Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int16: - fastpathTV.EncMapUint16Int16V(v, fastpathCheckNilTrue, e) - case *map[uint16]int16: - fastpathTV.EncMapUint16Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int32: - fastpathTV.EncMapUint16Int32V(v, fastpathCheckNilTrue, e) - case *map[uint16]int32: - fastpathTV.EncMapUint16Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]int64: - fastpathTV.EncMapUint16Int64V(v, fastpathCheckNilTrue, e) - case *map[uint16]int64: - fastpathTV.EncMapUint16Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float32: - fastpathTV.EncMapUint16Float32V(v, fastpathCheckNilTrue, e) - case *map[uint16]float32: - fastpathTV.EncMapUint16Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint16]float64: - fastpathTV.EncMapUint16Float64V(v, fastpathCheckNilTrue, e) - case *map[uint16]float64: - fastpathTV.EncMapUint16Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint16]bool: - fastpathTV.EncMapUint16BoolV(v, fastpathCheckNilTrue, e) - case *map[uint16]bool: - fastpathTV.EncMapUint16BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(v, fastpathCheckNilTrue, e) - case *map[uint32]interface{}: - fastpathTV.EncMapUint32IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint32]string: - fastpathTV.EncMapUint32StringV(v, fastpathCheckNilTrue, e) - case *map[uint32]string: - fastpathTV.EncMapUint32StringV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint: - fastpathTV.EncMapUint32UintV(v, fastpathCheckNilTrue, e) - case *map[uint32]uint: - fastpathTV.EncMapUint32UintV(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint8: - fastpathTV.EncMapUint32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint16: - fastpathTV.EncMapUint32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint32: - fastpathTV.EncMapUint32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint32]uint64: - fastpathTV.EncMapUint32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint32]uintptr: - fastpathTV.EncMapUint32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int: - fastpathTV.EncMapUint32IntV(v, fastpathCheckNilTrue, e) - case *map[uint32]int: - fastpathTV.EncMapUint32IntV(*v, fastpathCheckNilTrue, e) - - case map[uint32]int8: - fastpathTV.EncMapUint32Int8V(v, fastpathCheckNilTrue, e) - case *map[uint32]int8: - fastpathTV.EncMapUint32Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int16: - fastpathTV.EncMapUint32Int16V(v, fastpathCheckNilTrue, e) - case *map[uint32]int16: - fastpathTV.EncMapUint32Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int32: - fastpathTV.EncMapUint32Int32V(v, fastpathCheckNilTrue, e) - case *map[uint32]int32: - fastpathTV.EncMapUint32Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]int64: - fastpathTV.EncMapUint32Int64V(v, fastpathCheckNilTrue, e) - case *map[uint32]int64: - fastpathTV.EncMapUint32Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float32: - fastpathTV.EncMapUint32Float32V(v, fastpathCheckNilTrue, e) - case *map[uint32]float32: - fastpathTV.EncMapUint32Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint32]float64: - fastpathTV.EncMapUint32Float64V(v, fastpathCheckNilTrue, e) - case *map[uint32]float64: - fastpathTV.EncMapUint32Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint32]bool: - fastpathTV.EncMapUint32BoolV(v, fastpathCheckNilTrue, e) - case *map[uint32]bool: - fastpathTV.EncMapUint32BoolV(*v, fastpathCheckNilTrue, e) - - case map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(v, fastpathCheckNilTrue, e) - case *map[uint64]interface{}: - fastpathTV.EncMapUint64IntfV(*v, fastpathCheckNilTrue, e) - - case map[uint64]string: - fastpathTV.EncMapUint64StringV(v, fastpathCheckNilTrue, e) - case *map[uint64]string: - fastpathTV.EncMapUint64StringV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint: - fastpathTV.EncMapUint64UintV(v, fastpathCheckNilTrue, e) - case *map[uint64]uint: - fastpathTV.EncMapUint64UintV(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint8: - fastpathTV.EncMapUint64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint16: - fastpathTV.EncMapUint64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint32: - fastpathTV.EncMapUint64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(v, fastpathCheckNilTrue, e) - case *map[uint64]uint64: - fastpathTV.EncMapUint64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(v, fastpathCheckNilTrue, e) - case *map[uint64]uintptr: - fastpathTV.EncMapUint64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int: - fastpathTV.EncMapUint64IntV(v, fastpathCheckNilTrue, e) - case *map[uint64]int: - fastpathTV.EncMapUint64IntV(*v, fastpathCheckNilTrue, e) - - case map[uint64]int8: - fastpathTV.EncMapUint64Int8V(v, fastpathCheckNilTrue, e) - case *map[uint64]int8: - fastpathTV.EncMapUint64Int8V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int16: - fastpathTV.EncMapUint64Int16V(v, fastpathCheckNilTrue, e) - case *map[uint64]int16: - fastpathTV.EncMapUint64Int16V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int32: - fastpathTV.EncMapUint64Int32V(v, fastpathCheckNilTrue, e) - case *map[uint64]int32: - fastpathTV.EncMapUint64Int32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]int64: - fastpathTV.EncMapUint64Int64V(v, fastpathCheckNilTrue, e) - case *map[uint64]int64: - fastpathTV.EncMapUint64Int64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float32: - fastpathTV.EncMapUint64Float32V(v, fastpathCheckNilTrue, e) - case *map[uint64]float32: - fastpathTV.EncMapUint64Float32V(*v, fastpathCheckNilTrue, e) - - case map[uint64]float64: - fastpathTV.EncMapUint64Float64V(v, fastpathCheckNilTrue, e) - case *map[uint64]float64: - fastpathTV.EncMapUint64Float64V(*v, fastpathCheckNilTrue, e) - - case map[uint64]bool: - fastpathTV.EncMapUint64BoolV(v, fastpathCheckNilTrue, e) - case *map[uint64]bool: - fastpathTV.EncMapUint64BoolV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(v, fastpathCheckNilTrue, e) - case *map[uintptr]interface{}: - fastpathTV.EncMapUintptrIntfV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]string: - fastpathTV.EncMapUintptrStringV(v, fastpathCheckNilTrue, e) - case *map[uintptr]string: - fastpathTV.EncMapUintptrStringV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint: - fastpathTV.EncMapUintptrUintV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint8: - fastpathTV.EncMapUintptrUint8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint16: - fastpathTV.EncMapUintptrUint16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint32: - fastpathTV.EncMapUintptrUint32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]uint64: - fastpathTV.EncMapUintptrUint64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(v, fastpathCheckNilTrue, e) - case *map[uintptr]uintptr: - fastpathTV.EncMapUintptrUintptrV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int: - fastpathTV.EncMapUintptrIntV(v, fastpathCheckNilTrue, e) - case *map[uintptr]int: - fastpathTV.EncMapUintptrIntV(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int8: - fastpathTV.EncMapUintptrInt8V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int16: - fastpathTV.EncMapUintptrInt16V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int32: - fastpathTV.EncMapUintptrInt32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]int64: - fastpathTV.EncMapUintptrInt64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float32: - fastpathTV.EncMapUintptrFloat32V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(v, fastpathCheckNilTrue, e) - case *map[uintptr]float64: - fastpathTV.EncMapUintptrFloat64V(*v, fastpathCheckNilTrue, e) - - case map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(v, fastpathCheckNilTrue, e) - case *map[uintptr]bool: - fastpathTV.EncMapUintptrBoolV(*v, fastpathCheckNilTrue, e) - - case map[int]interface{}: - fastpathTV.EncMapIntIntfV(v, fastpathCheckNilTrue, e) - case *map[int]interface{}: - fastpathTV.EncMapIntIntfV(*v, fastpathCheckNilTrue, e) - - case map[int]string: - fastpathTV.EncMapIntStringV(v, fastpathCheckNilTrue, e) - case *map[int]string: - fastpathTV.EncMapIntStringV(*v, fastpathCheckNilTrue, e) - - case map[int]uint: - fastpathTV.EncMapIntUintV(v, fastpathCheckNilTrue, e) - case *map[int]uint: - fastpathTV.EncMapIntUintV(*v, fastpathCheckNilTrue, e) - - case map[int]uint8: - fastpathTV.EncMapIntUint8V(v, fastpathCheckNilTrue, e) - case *map[int]uint8: - fastpathTV.EncMapIntUint8V(*v, fastpathCheckNilTrue, e) - - case map[int]uint16: - fastpathTV.EncMapIntUint16V(v, fastpathCheckNilTrue, e) - case *map[int]uint16: - fastpathTV.EncMapIntUint16V(*v, fastpathCheckNilTrue, e) - - case map[int]uint32: - fastpathTV.EncMapIntUint32V(v, fastpathCheckNilTrue, e) - case *map[int]uint32: - fastpathTV.EncMapIntUint32V(*v, fastpathCheckNilTrue, e) - - case map[int]uint64: - fastpathTV.EncMapIntUint64V(v, fastpathCheckNilTrue, e) - case *map[int]uint64: - fastpathTV.EncMapIntUint64V(*v, fastpathCheckNilTrue, e) - - case map[int]uintptr: - fastpathTV.EncMapIntUintptrV(v, fastpathCheckNilTrue, e) - case *map[int]uintptr: - fastpathTV.EncMapIntUintptrV(*v, fastpathCheckNilTrue, e) - - case map[int]int: - fastpathTV.EncMapIntIntV(v, fastpathCheckNilTrue, e) - case *map[int]int: - fastpathTV.EncMapIntIntV(*v, fastpathCheckNilTrue, e) - - case map[int]int8: - fastpathTV.EncMapIntInt8V(v, fastpathCheckNilTrue, e) - case *map[int]int8: - fastpathTV.EncMapIntInt8V(*v, fastpathCheckNilTrue, e) - - case map[int]int16: - fastpathTV.EncMapIntInt16V(v, fastpathCheckNilTrue, e) - case *map[int]int16: - fastpathTV.EncMapIntInt16V(*v, fastpathCheckNilTrue, e) - - case map[int]int32: - fastpathTV.EncMapIntInt32V(v, fastpathCheckNilTrue, e) - case *map[int]int32: - fastpathTV.EncMapIntInt32V(*v, fastpathCheckNilTrue, e) - - case map[int]int64: - fastpathTV.EncMapIntInt64V(v, fastpathCheckNilTrue, e) - case *map[int]int64: - fastpathTV.EncMapIntInt64V(*v, fastpathCheckNilTrue, e) - - case map[int]float32: - fastpathTV.EncMapIntFloat32V(v, fastpathCheckNilTrue, e) - case *map[int]float32: - fastpathTV.EncMapIntFloat32V(*v, fastpathCheckNilTrue, e) - - case map[int]float64: - fastpathTV.EncMapIntFloat64V(v, fastpathCheckNilTrue, e) - case *map[int]float64: - fastpathTV.EncMapIntFloat64V(*v, fastpathCheckNilTrue, e) - - case map[int]bool: - fastpathTV.EncMapIntBoolV(v, fastpathCheckNilTrue, e) - case *map[int]bool: - fastpathTV.EncMapIntBoolV(*v, fastpathCheckNilTrue, e) - - case map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(v, fastpathCheckNilTrue, e) - case *map[int8]interface{}: - fastpathTV.EncMapInt8IntfV(*v, fastpathCheckNilTrue, e) - - case map[int8]string: - fastpathTV.EncMapInt8StringV(v, fastpathCheckNilTrue, e) - case *map[int8]string: - fastpathTV.EncMapInt8StringV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint: - fastpathTV.EncMapInt8UintV(v, fastpathCheckNilTrue, e) - case *map[int8]uint: - fastpathTV.EncMapInt8UintV(*v, fastpathCheckNilTrue, e) - - case map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(v, fastpathCheckNilTrue, e) - case *map[int8]uint8: - fastpathTV.EncMapInt8Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(v, fastpathCheckNilTrue, e) - case *map[int8]uint16: - fastpathTV.EncMapInt8Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(v, fastpathCheckNilTrue, e) - case *map[int8]uint32: - fastpathTV.EncMapInt8Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(v, fastpathCheckNilTrue, e) - case *map[int8]uint64: - fastpathTV.EncMapInt8Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(v, fastpathCheckNilTrue, e) - case *map[int8]uintptr: - fastpathTV.EncMapInt8UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int8]int: - fastpathTV.EncMapInt8IntV(v, fastpathCheckNilTrue, e) - case *map[int8]int: - fastpathTV.EncMapInt8IntV(*v, fastpathCheckNilTrue, e) - - case map[int8]int8: - fastpathTV.EncMapInt8Int8V(v, fastpathCheckNilTrue, e) - case *map[int8]int8: - fastpathTV.EncMapInt8Int8V(*v, fastpathCheckNilTrue, e) - - case map[int8]int16: - fastpathTV.EncMapInt8Int16V(v, fastpathCheckNilTrue, e) - case *map[int8]int16: - fastpathTV.EncMapInt8Int16V(*v, fastpathCheckNilTrue, e) - - case map[int8]int32: - fastpathTV.EncMapInt8Int32V(v, fastpathCheckNilTrue, e) - case *map[int8]int32: - fastpathTV.EncMapInt8Int32V(*v, fastpathCheckNilTrue, e) - - case map[int8]int64: - fastpathTV.EncMapInt8Int64V(v, fastpathCheckNilTrue, e) - case *map[int8]int64: - fastpathTV.EncMapInt8Int64V(*v, fastpathCheckNilTrue, e) - - case map[int8]float32: - fastpathTV.EncMapInt8Float32V(v, fastpathCheckNilTrue, e) - case *map[int8]float32: - fastpathTV.EncMapInt8Float32V(*v, fastpathCheckNilTrue, e) - - case map[int8]float64: - fastpathTV.EncMapInt8Float64V(v, fastpathCheckNilTrue, e) - case *map[int8]float64: - fastpathTV.EncMapInt8Float64V(*v, fastpathCheckNilTrue, e) - - case map[int8]bool: - fastpathTV.EncMapInt8BoolV(v, fastpathCheckNilTrue, e) - case *map[int8]bool: - fastpathTV.EncMapInt8BoolV(*v, fastpathCheckNilTrue, e) - - case map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(v, fastpathCheckNilTrue, e) - case *map[int16]interface{}: - fastpathTV.EncMapInt16IntfV(*v, fastpathCheckNilTrue, e) - - case map[int16]string: - fastpathTV.EncMapInt16StringV(v, fastpathCheckNilTrue, e) - case *map[int16]string: - fastpathTV.EncMapInt16StringV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint: - fastpathTV.EncMapInt16UintV(v, fastpathCheckNilTrue, e) - case *map[int16]uint: - fastpathTV.EncMapInt16UintV(*v, fastpathCheckNilTrue, e) - - case map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(v, fastpathCheckNilTrue, e) - case *map[int16]uint8: - fastpathTV.EncMapInt16Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(v, fastpathCheckNilTrue, e) - case *map[int16]uint16: - fastpathTV.EncMapInt16Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(v, fastpathCheckNilTrue, e) - case *map[int16]uint32: - fastpathTV.EncMapInt16Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(v, fastpathCheckNilTrue, e) - case *map[int16]uint64: - fastpathTV.EncMapInt16Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(v, fastpathCheckNilTrue, e) - case *map[int16]uintptr: - fastpathTV.EncMapInt16UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int16]int: - fastpathTV.EncMapInt16IntV(v, fastpathCheckNilTrue, e) - case *map[int16]int: - fastpathTV.EncMapInt16IntV(*v, fastpathCheckNilTrue, e) - - case map[int16]int8: - fastpathTV.EncMapInt16Int8V(v, fastpathCheckNilTrue, e) - case *map[int16]int8: - fastpathTV.EncMapInt16Int8V(*v, fastpathCheckNilTrue, e) - - case map[int16]int16: - fastpathTV.EncMapInt16Int16V(v, fastpathCheckNilTrue, e) - case *map[int16]int16: - fastpathTV.EncMapInt16Int16V(*v, fastpathCheckNilTrue, e) - - case map[int16]int32: - fastpathTV.EncMapInt16Int32V(v, fastpathCheckNilTrue, e) - case *map[int16]int32: - fastpathTV.EncMapInt16Int32V(*v, fastpathCheckNilTrue, e) - - case map[int16]int64: - fastpathTV.EncMapInt16Int64V(v, fastpathCheckNilTrue, e) - case *map[int16]int64: - fastpathTV.EncMapInt16Int64V(*v, fastpathCheckNilTrue, e) - - case map[int16]float32: - fastpathTV.EncMapInt16Float32V(v, fastpathCheckNilTrue, e) - case *map[int16]float32: - fastpathTV.EncMapInt16Float32V(*v, fastpathCheckNilTrue, e) - - case map[int16]float64: - fastpathTV.EncMapInt16Float64V(v, fastpathCheckNilTrue, e) - case *map[int16]float64: - fastpathTV.EncMapInt16Float64V(*v, fastpathCheckNilTrue, e) - - case map[int16]bool: - fastpathTV.EncMapInt16BoolV(v, fastpathCheckNilTrue, e) - case *map[int16]bool: - fastpathTV.EncMapInt16BoolV(*v, fastpathCheckNilTrue, e) - - case map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(v, fastpathCheckNilTrue, e) - case *map[int32]interface{}: - fastpathTV.EncMapInt32IntfV(*v, fastpathCheckNilTrue, e) - - case map[int32]string: - fastpathTV.EncMapInt32StringV(v, fastpathCheckNilTrue, e) - case *map[int32]string: - fastpathTV.EncMapInt32StringV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint: - fastpathTV.EncMapInt32UintV(v, fastpathCheckNilTrue, e) - case *map[int32]uint: - fastpathTV.EncMapInt32UintV(*v, fastpathCheckNilTrue, e) - - case map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(v, fastpathCheckNilTrue, e) - case *map[int32]uint8: - fastpathTV.EncMapInt32Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(v, fastpathCheckNilTrue, e) - case *map[int32]uint16: - fastpathTV.EncMapInt32Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(v, fastpathCheckNilTrue, e) - case *map[int32]uint32: - fastpathTV.EncMapInt32Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(v, fastpathCheckNilTrue, e) - case *map[int32]uint64: - fastpathTV.EncMapInt32Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(v, fastpathCheckNilTrue, e) - case *map[int32]uintptr: - fastpathTV.EncMapInt32UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int32]int: - fastpathTV.EncMapInt32IntV(v, fastpathCheckNilTrue, e) - case *map[int32]int: - fastpathTV.EncMapInt32IntV(*v, fastpathCheckNilTrue, e) - - case map[int32]int8: - fastpathTV.EncMapInt32Int8V(v, fastpathCheckNilTrue, e) - case *map[int32]int8: - fastpathTV.EncMapInt32Int8V(*v, fastpathCheckNilTrue, e) - - case map[int32]int16: - fastpathTV.EncMapInt32Int16V(v, fastpathCheckNilTrue, e) - case *map[int32]int16: - fastpathTV.EncMapInt32Int16V(*v, fastpathCheckNilTrue, e) - - case map[int32]int32: - fastpathTV.EncMapInt32Int32V(v, fastpathCheckNilTrue, e) - case *map[int32]int32: - fastpathTV.EncMapInt32Int32V(*v, fastpathCheckNilTrue, e) - - case map[int32]int64: - fastpathTV.EncMapInt32Int64V(v, fastpathCheckNilTrue, e) - case *map[int32]int64: - fastpathTV.EncMapInt32Int64V(*v, fastpathCheckNilTrue, e) - - case map[int32]float32: - fastpathTV.EncMapInt32Float32V(v, fastpathCheckNilTrue, e) - case *map[int32]float32: - fastpathTV.EncMapInt32Float32V(*v, fastpathCheckNilTrue, e) - - case map[int32]float64: - fastpathTV.EncMapInt32Float64V(v, fastpathCheckNilTrue, e) - case *map[int32]float64: - fastpathTV.EncMapInt32Float64V(*v, fastpathCheckNilTrue, e) - - case map[int32]bool: - fastpathTV.EncMapInt32BoolV(v, fastpathCheckNilTrue, e) - case *map[int32]bool: - fastpathTV.EncMapInt32BoolV(*v, fastpathCheckNilTrue, e) - - case map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(v, fastpathCheckNilTrue, e) - case *map[int64]interface{}: - fastpathTV.EncMapInt64IntfV(*v, fastpathCheckNilTrue, e) - - case map[int64]string: - fastpathTV.EncMapInt64StringV(v, fastpathCheckNilTrue, e) - case *map[int64]string: - fastpathTV.EncMapInt64StringV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint: - fastpathTV.EncMapInt64UintV(v, fastpathCheckNilTrue, e) - case *map[int64]uint: - fastpathTV.EncMapInt64UintV(*v, fastpathCheckNilTrue, e) - - case map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(v, fastpathCheckNilTrue, e) - case *map[int64]uint8: - fastpathTV.EncMapInt64Uint8V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(v, fastpathCheckNilTrue, e) - case *map[int64]uint16: - fastpathTV.EncMapInt64Uint16V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(v, fastpathCheckNilTrue, e) - case *map[int64]uint32: - fastpathTV.EncMapInt64Uint32V(*v, fastpathCheckNilTrue, e) - - case map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(v, fastpathCheckNilTrue, e) - case *map[int64]uint64: - fastpathTV.EncMapInt64Uint64V(*v, fastpathCheckNilTrue, e) - - case map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(v, fastpathCheckNilTrue, e) - case *map[int64]uintptr: - fastpathTV.EncMapInt64UintptrV(*v, fastpathCheckNilTrue, e) - - case map[int64]int: - fastpathTV.EncMapInt64IntV(v, fastpathCheckNilTrue, e) - case *map[int64]int: - fastpathTV.EncMapInt64IntV(*v, fastpathCheckNilTrue, e) - - case map[int64]int8: - fastpathTV.EncMapInt64Int8V(v, fastpathCheckNilTrue, e) - case *map[int64]int8: - fastpathTV.EncMapInt64Int8V(*v, fastpathCheckNilTrue, e) - - case map[int64]int16: - fastpathTV.EncMapInt64Int16V(v, fastpathCheckNilTrue, e) - case *map[int64]int16: - fastpathTV.EncMapInt64Int16V(*v, fastpathCheckNilTrue, e) - - case map[int64]int32: - fastpathTV.EncMapInt64Int32V(v, fastpathCheckNilTrue, e) - case *map[int64]int32: - fastpathTV.EncMapInt64Int32V(*v, fastpathCheckNilTrue, e) - - case map[int64]int64: - fastpathTV.EncMapInt64Int64V(v, fastpathCheckNilTrue, e) - case *map[int64]int64: - fastpathTV.EncMapInt64Int64V(*v, fastpathCheckNilTrue, e) - - case map[int64]float32: - fastpathTV.EncMapInt64Float32V(v, fastpathCheckNilTrue, e) - case *map[int64]float32: - fastpathTV.EncMapInt64Float32V(*v, fastpathCheckNilTrue, e) - - case map[int64]float64: - fastpathTV.EncMapInt64Float64V(v, fastpathCheckNilTrue, e) - case *map[int64]float64: - fastpathTV.EncMapInt64Float64V(*v, fastpathCheckNilTrue, e) - - case map[int64]bool: - fastpathTV.EncMapInt64BoolV(v, fastpathCheckNilTrue, e) - case *map[int64]bool: - fastpathTV.EncMapInt64BoolV(*v, fastpathCheckNilTrue, e) - - case map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(v, fastpathCheckNilTrue, e) - case *map[bool]interface{}: - fastpathTV.EncMapBoolIntfV(*v, fastpathCheckNilTrue, e) - - case map[bool]string: - fastpathTV.EncMapBoolStringV(v, fastpathCheckNilTrue, e) - case *map[bool]string: - fastpathTV.EncMapBoolStringV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint: - fastpathTV.EncMapBoolUintV(v, fastpathCheckNilTrue, e) - case *map[bool]uint: - fastpathTV.EncMapBoolUintV(*v, fastpathCheckNilTrue, e) - - case map[bool]uint8: - fastpathTV.EncMapBoolUint8V(v, fastpathCheckNilTrue, e) - case *map[bool]uint8: - fastpathTV.EncMapBoolUint8V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint16: - fastpathTV.EncMapBoolUint16V(v, fastpathCheckNilTrue, e) - case *map[bool]uint16: - fastpathTV.EncMapBoolUint16V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint32: - fastpathTV.EncMapBoolUint32V(v, fastpathCheckNilTrue, e) - case *map[bool]uint32: - fastpathTV.EncMapBoolUint32V(*v, fastpathCheckNilTrue, e) - - case map[bool]uint64: - fastpathTV.EncMapBoolUint64V(v, fastpathCheckNilTrue, e) - case *map[bool]uint64: - fastpathTV.EncMapBoolUint64V(*v, fastpathCheckNilTrue, e) - - case map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(v, fastpathCheckNilTrue, e) - case *map[bool]uintptr: - fastpathTV.EncMapBoolUintptrV(*v, fastpathCheckNilTrue, e) - - case map[bool]int: - fastpathTV.EncMapBoolIntV(v, fastpathCheckNilTrue, e) - case *map[bool]int: - fastpathTV.EncMapBoolIntV(*v, fastpathCheckNilTrue, e) - - case map[bool]int8: - fastpathTV.EncMapBoolInt8V(v, fastpathCheckNilTrue, e) - case *map[bool]int8: - fastpathTV.EncMapBoolInt8V(*v, fastpathCheckNilTrue, e) - - case map[bool]int16: - fastpathTV.EncMapBoolInt16V(v, fastpathCheckNilTrue, e) - case *map[bool]int16: - fastpathTV.EncMapBoolInt16V(*v, fastpathCheckNilTrue, e) - - case map[bool]int32: - fastpathTV.EncMapBoolInt32V(v, fastpathCheckNilTrue, e) - case *map[bool]int32: - fastpathTV.EncMapBoolInt32V(*v, fastpathCheckNilTrue, e) - - case map[bool]int64: - fastpathTV.EncMapBoolInt64V(v, fastpathCheckNilTrue, e) - case *map[bool]int64: - fastpathTV.EncMapBoolInt64V(*v, fastpathCheckNilTrue, e) - - case map[bool]float32: - fastpathTV.EncMapBoolFloat32V(v, fastpathCheckNilTrue, e) - case *map[bool]float32: - fastpathTV.EncMapBoolFloat32V(*v, fastpathCheckNilTrue, e) - - case map[bool]float64: - fastpathTV.EncMapBoolFloat64V(v, fastpathCheckNilTrue, e) - case *map[bool]float64: - fastpathTV.EncMapBoolFloat64V(*v, fastpathCheckNilTrue, e) - - case map[bool]bool: - fastpathTV.EncMapBoolBoolV(v, fastpathCheckNilTrue, e) - case *map[bool]bool: - fastpathTV.EncMapBoolBoolV(*v, fastpathCheckNilTrue, e) + fastpathTV.EncMapBoolBoolV(*v, e) default: _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) @@ -3113,812 +1744,661 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { // -- -- fast path functions -func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) } else { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) } } -func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } e.encode(v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } e.encode(v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) } else { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) } } -func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeString(c_UTF8, v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeString(c_UTF8, v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) } else { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) } } -func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeFloat32(v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeFloat32(v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) } else { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) } } -func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeFloat64(v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeFloat64(v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) } else { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) } } -func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) } else { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) } } -func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) } else { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) } } -func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) } else { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) } } -func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeUint(uint64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) } else { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) } } -func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } e.encode(v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } e.encode(v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) } else { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) } } -func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) } else { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) } } -func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) } else { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) } } -func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) } else { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) } } -func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) } else { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) } } -func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeInt(int64(v2)) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) } else { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) } } -func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeArrayStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { - cr.sendContainerState(containerArrayElem) + if esep { + ee.WriteArrayElem() } ee.EncodeBool(v2) } - if cr != nil { - cr.sendContainerState(containerArrayEnd) - } + ee.WriteArrayEnd() } -func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } ee.EncodeBool(v2) } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { - fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) } -func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -3935,43 +2415,40 @@ func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfStringR(rv reflect.Value) { - fastpathTV.EncMapIntfStringV(rv.Interface().(map[interface{}]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) } -func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -3988,43 +2465,40 @@ func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, checkNil bool, e } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUintR(rv reflect.Value) { - fastpathTV.EncMapIntfUintV(rv.Interface().(map[interface{}]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) } -func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4041,43 +2515,40 @@ func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, checkNil bool, e *Enc } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUint8R(rv reflect.Value) { - fastpathTV.EncMapIntfUint8V(rv.Interface().(map[interface{}]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) } -func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4094,43 +2565,40 @@ func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, checkNil bool, e *E } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUint16R(rv reflect.Value) { - fastpathTV.EncMapIntfUint16V(rv.Interface().(map[interface{}]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) } -func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4147,43 +2615,40 @@ func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, checkNil bool, e } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUint32R(rv reflect.Value) { - fastpathTV.EncMapIntfUint32V(rv.Interface().(map[interface{}]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) } -func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4200,43 +2665,40 @@ func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, checkNil bool, e } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUint64R(rv reflect.Value) { - fastpathTV.EncMapIntfUint64V(rv.Interface().(map[interface{}]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) } -func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4253,43 +2715,40 @@ func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, checkNil bool, e } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntfUintptrV(rv.Interface().(map[interface{}]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) } -func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4306,43 +2765,40 @@ func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfIntR(rv reflect.Value) { - fastpathTV.EncMapIntfIntV(rv.Interface().(map[interface{}]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) } -func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4359,43 +2815,40 @@ func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, checkNil bool, e *Encod } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfInt8R(rv reflect.Value) { - fastpathTV.EncMapIntfInt8V(rv.Interface().(map[interface{}]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) } -func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4412,43 +2865,40 @@ func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, checkNil bool, e *Enc } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfInt16R(rv reflect.Value) { - fastpathTV.EncMapIntfInt16V(rv.Interface().(map[interface{}]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) } -func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4465,43 +2915,40 @@ func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, checkNil bool, e *E } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfInt32R(rv reflect.Value) { - fastpathTV.EncMapIntfInt32V(rv.Interface().(map[interface{}]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) } -func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4518,43 +2965,40 @@ func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, checkNil bool, e *E } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfInt64R(rv reflect.Value) { - fastpathTV.EncMapIntfInt64V(rv.Interface().(map[interface{}]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) } -func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4571,43 +3015,40 @@ func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, checkNil bool, e *E } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat32V(rv.Interface().(map[interface{}]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) } -func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4624,43 +3065,40 @@ func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, checkNil bool, } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntfFloat64V(rv.Interface().(map[interface{}]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) } -func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4677,43 +3115,40 @@ func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, checkNil bool, } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntfBoolR(rv reflect.Value) { - fastpathTV.EncMapIntfBoolV(rv.Interface().(map[interface{}]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) } -func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding e2 := NewEncoderBytes(&mksv, e.hh) @@ -4730,43 +3165,40 @@ func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, checkNil bool, e *Enc } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringIntfR(rv reflect.Value) { - fastpathTV.EncMapStringIntfV(rv.Interface().(map[string]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) } -func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -4777,51 +3209,48 @@ func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, checkNil bool, e } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringStringR(rv reflect.Value) { - fastpathTV.EncMapStringStringV(rv.Interface().(map[string]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) } -func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -4832,51 +3261,48 @@ func (_ fastpathT) EncMapStringStringV(v map[string]string, checkNil bool, e *En } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUintR(rv reflect.Value) { - fastpathTV.EncMapStringUintV(rv.Interface().(map[string]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) } -func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -4887,51 +3313,48 @@ func (_ fastpathT) EncMapStringUintV(v map[string]uint, checkNil bool, e *Encode } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUint8R(rv reflect.Value) { - fastpathTV.EncMapStringUint8V(rv.Interface().(map[string]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) } -func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -4942,51 +3365,48 @@ func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, checkNil bool, e *Enco } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUint16R(rv reflect.Value) { - fastpathTV.EncMapStringUint16V(rv.Interface().(map[string]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) } -func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -4997,51 +3417,48 @@ func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, checkNil bool, e *En } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUint32R(rv reflect.Value) { - fastpathTV.EncMapStringUint32V(rv.Interface().(map[string]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) } -func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5052,51 +3469,48 @@ func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, checkNil bool, e *En } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUint64R(rv reflect.Value) { - fastpathTV.EncMapStringUint64V(rv.Interface().(map[string]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) } -func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5107,51 +3521,48 @@ func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, checkNil bool, e *En } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringUintptrR(rv reflect.Value) { - fastpathTV.EncMapStringUintptrV(rv.Interface().(map[string]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) } -func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5162,51 +3573,48 @@ func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, checkNil bool, e * } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringIntR(rv reflect.Value) { - fastpathTV.EncMapStringIntV(rv.Interface().(map[string]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) } -func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5217,51 +3625,48 @@ func (_ fastpathT) EncMapStringIntV(v map[string]int, checkNil bool, e *Encoder) } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringInt8R(rv reflect.Value) { - fastpathTV.EncMapStringInt8V(rv.Interface().(map[string]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) } -func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5272,51 +3677,48 @@ func (_ fastpathT) EncMapStringInt8V(v map[string]int8, checkNil bool, e *Encode } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringInt16R(rv reflect.Value) { - fastpathTV.EncMapStringInt16V(rv.Interface().(map[string]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) } -func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5327,51 +3729,48 @@ func (_ fastpathT) EncMapStringInt16V(v map[string]int16, checkNil bool, e *Enco } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringInt32R(rv reflect.Value) { - fastpathTV.EncMapStringInt32V(rv.Interface().(map[string]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) } -func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5382,51 +3781,48 @@ func (_ fastpathT) EncMapStringInt32V(v map[string]int32, checkNil bool, e *Enco } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringInt64R(rv reflect.Value) { - fastpathTV.EncMapStringInt64V(rv.Interface().(map[string]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) } -func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5437,51 +3833,48 @@ func (_ fastpathT) EncMapStringInt64V(v map[string]int64, checkNil bool, e *Enco } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[string(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringFloat32R(rv reflect.Value) { - fastpathTV.EncMapStringFloat32V(rv.Interface().(map[string]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) } -func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5492,51 +3885,48 @@ func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, checkNil bool, e * } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringFloat64R(rv reflect.Value) { - fastpathTV.EncMapStringFloat64V(rv.Interface().(map[string]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) } -func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5547,51 +3937,48 @@ func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, checkNil bool, e * } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapStringBoolR(rv reflect.Value) { - fastpathTV.EncMapStringBoolV(rv.Interface().(map[string]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) } -func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 if e.h.Canonical { v2 := make([]string, len(v)) @@ -5602,51 +3989,48 @@ func (_ fastpathT) EncMapStringBoolV(v map[string]bool, checkNil bool, e *Encode } sort.Sort(stringSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[string(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) } - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntfV(rv.Interface().(map[float32]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) } -func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5656,43 +4040,40 @@ func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, checkNil bool, } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32StringR(rv reflect.Value) { - fastpathTV.EncMapFloat32StringV(rv.Interface().(map[float32]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) } -func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5702,43 +4083,40 @@ func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32UintR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintV(rv.Interface().(map[float32]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) } -func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5748,43 +4126,40 @@ func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint8V(rv.Interface().(map[float32]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) } -func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5794,43 +4169,40 @@ func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint16V(rv.Interface().(map[float32]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) } -func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5840,43 +4212,40 @@ func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint32V(rv.Interface().(map[float32]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) } -func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5886,43 +4255,40 @@ func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Uint64V(rv.Interface().(map[float32]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) } -func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5932,43 +4298,40 @@ func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat32UintptrV(rv.Interface().(map[float32]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) } -func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -5978,43 +4341,40 @@ func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32IntR(rv reflect.Value) { - fastpathTV.EncMapFloat32IntV(rv.Interface().(map[float32]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) } -func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6024,43 +4384,40 @@ func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, checkNil bool, e *Encode } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int8V(rv.Interface().(map[float32]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) } -func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6070,43 +4427,40 @@ func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int16V(rv.Interface().(map[float32]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) } -func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6116,43 +4470,40 @@ func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int32V(rv.Interface().(map[float32]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) } -func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6162,43 +4513,40 @@ func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Int64V(rv.Interface().(map[float32]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) } -func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6208,43 +4556,40 @@ func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float32V(rv.Interface().(map[float32]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) } -func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6254,43 +4599,40 @@ func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat32Float64V(rv.Interface().(map[float32]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) } -func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6300,43 +4642,40 @@ func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat32BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat32BoolV(rv.Interface().(map[float32]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) } -func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6346,43 +4685,40 @@ func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(float32(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[float32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat32(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64IntfR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntfV(rv.Interface().(map[float64]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) } -func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6392,43 +4728,40 @@ func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, checkNil bool, } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64StringR(rv reflect.Value) { - fastpathTV.EncMapFloat64StringV(rv.Interface().(map[float64]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) } -func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6438,43 +4771,40 @@ func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64UintR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintV(rv.Interface().(map[float64]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) } -func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6484,43 +4814,40 @@ func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Uint8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint8V(rv.Interface().(map[float64]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) } -func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6530,43 +4857,40 @@ func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Uint16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint16V(rv.Interface().(map[float64]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) } -func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6576,43 +4900,40 @@ func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Uint32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint32V(rv.Interface().(map[float64]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) } -func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6622,43 +4943,40 @@ func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Uint64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Uint64V(rv.Interface().(map[float64]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) } -func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6668,43 +4986,40 @@ func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, checkNil bool, e * } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64UintptrR(rv reflect.Value) { - fastpathTV.EncMapFloat64UintptrV(rv.Interface().(map[float64]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) } -func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6714,43 +5029,40 @@ func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64IntR(rv reflect.Value) { - fastpathTV.EncMapFloat64IntV(rv.Interface().(map[float64]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) } -func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6760,43 +5072,40 @@ func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, checkNil bool, e *Encode } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Int8R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int8V(rv.Interface().(map[float64]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) } -func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6806,43 +5115,40 @@ func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Int16R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int16V(rv.Interface().(map[float64]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) } -func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6852,43 +5158,40 @@ func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Int32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int32V(rv.Interface().(map[float64]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) } -func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6898,43 +5201,40 @@ func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Int64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Int64V(rv.Interface().(map[float64]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) } -func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6944,43 +5244,40 @@ func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, checkNil bool, e *En } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[float64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Float32R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float32V(rv.Interface().(map[float64]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) } -func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -6990,43 +5287,40 @@ func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64Float64R(rv reflect.Value) { - fastpathTV.EncMapFloat64Float64V(rv.Interface().(map[float64]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) } -func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -7036,43 +5330,40 @@ func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, checkNil bool, e } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapFloat64BoolR(rv reflect.Value) { - fastpathTV.EncMapFloat64BoolV(rv.Interface().(map[float64]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) } -func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]float64, len(v)) var i int @@ -7082,43 +5373,40 @@ func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, checkNil bool, e *Enco } sort.Sort(floatSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(float64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[float64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeFloat64(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintIntfR(rv reflect.Value) { - fastpathTV.EncMapUintIntfV(rv.Interface().(map[uint]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) } -func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7128,43 +5416,40 @@ func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, checkNil bool, e *Enc } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintStringR(rv reflect.Value) { - fastpathTV.EncMapUintStringV(rv.Interface().(map[uint]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) } -func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7174,43 +5459,40 @@ func (_ fastpathT) EncMapUintStringV(v map[uint]string, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUintR(rv reflect.Value) { - fastpathTV.EncMapUintUintV(rv.Interface().(map[uint]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) } -func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7220,43 +5502,40 @@ func (_ fastpathT) EncMapUintUintV(v map[uint]uint, checkNil bool, e *Encoder) { } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUint8R(rv reflect.Value) { - fastpathTV.EncMapUintUint8V(rv.Interface().(map[uint]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) } -func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7266,43 +5545,40 @@ func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUint16R(rv reflect.Value) { - fastpathTV.EncMapUintUint16V(rv.Interface().(map[uint]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) } -func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7312,43 +5588,40 @@ func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUint32R(rv reflect.Value) { - fastpathTV.EncMapUintUint32V(rv.Interface().(map[uint]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) } -func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7358,43 +5631,40 @@ func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUint64R(rv reflect.Value) { - fastpathTV.EncMapUintUint64V(rv.Interface().(map[uint]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) } -func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7404,43 +5674,40 @@ func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintUintptrV(rv.Interface().(map[uint]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) } -func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7450,43 +5717,40 @@ func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintIntR(rv reflect.Value) { - fastpathTV.EncMapUintIntV(rv.Interface().(map[uint]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) } -func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7496,43 +5760,40 @@ func (_ fastpathT) EncMapUintIntV(v map[uint]int, checkNil bool, e *Encoder) { } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintInt8R(rv reflect.Value) { - fastpathTV.EncMapUintInt8V(rv.Interface().(map[uint]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) } -func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7542,43 +5803,40 @@ func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, checkNil bool, e *Encoder) { } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintInt16R(rv reflect.Value) { - fastpathTV.EncMapUintInt16V(rv.Interface().(map[uint]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) } -func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7588,43 +5846,40 @@ func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintInt32R(rv reflect.Value) { - fastpathTV.EncMapUintInt32V(rv.Interface().(map[uint]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) } -func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7634,43 +5889,40 @@ func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintInt64R(rv reflect.Value) { - fastpathTV.EncMapUintInt64V(rv.Interface().(map[uint]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) } -func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7680,43 +5932,40 @@ func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintFloat32V(rv.Interface().(map[uint]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) } -func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7726,43 +5975,40 @@ func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintFloat64V(rv.Interface().(map[uint]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) } -func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7772,43 +6018,40 @@ func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintBoolR(rv reflect.Value) { - fastpathTV.EncMapUintBoolV(rv.Interface().(map[uint]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) } -func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7818,43 +6061,40 @@ func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, checkNil bool, e *Encoder) { } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uint(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8IntfR(rv reflect.Value) { - fastpathTV.EncMapUint8IntfV(rv.Interface().(map[uint8]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) } -func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7864,43 +6104,40 @@ func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, checkNil bool, e *E } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8StringR(rv reflect.Value) { - fastpathTV.EncMapUint8StringV(rv.Interface().(map[uint8]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) } -func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7910,43 +6147,40 @@ func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8UintR(rv reflect.Value) { - fastpathTV.EncMapUint8UintV(rv.Interface().(map[uint8]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) } -func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -7956,43 +6190,40 @@ func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint8V(rv.Interface().(map[uint8]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) } -func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8002,43 +6233,40 @@ func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint16V(rv.Interface().(map[uint8]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) } -func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8048,43 +6276,40 @@ func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint32V(rv.Interface().(map[uint8]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) } -func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8094,43 +6319,40 @@ func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint8Uint64V(rv.Interface().(map[uint8]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) } -func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8140,43 +6362,40 @@ func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint8UintptrV(rv.Interface().(map[uint8]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) } -func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8186,43 +6405,40 @@ func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8IntR(rv reflect.Value) { - fastpathTV.EncMapUint8IntV(rv.Interface().(map[uint8]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) } -func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8232,43 +6448,40 @@ func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, checkNil bool, e *Encoder) { } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Int8R(rv reflect.Value) { - fastpathTV.EncMapUint8Int8V(rv.Interface().(map[uint8]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) } -func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8278,43 +6491,40 @@ func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Int16R(rv reflect.Value) { - fastpathTV.EncMapUint8Int16V(rv.Interface().(map[uint8]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) } -func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8324,43 +6534,40 @@ func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Int32R(rv reflect.Value) { - fastpathTV.EncMapUint8Int32V(rv.Interface().(map[uint8]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) } -func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8370,43 +6577,40 @@ func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Int64R(rv reflect.Value) { - fastpathTV.EncMapUint8Int64V(rv.Interface().(map[uint8]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) } -func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8416,43 +6620,40 @@ func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Float32R(rv reflect.Value) { - fastpathTV.EncMapUint8Float32V(rv.Interface().(map[uint8]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) } -func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8462,43 +6663,40 @@ func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8Float64R(rv reflect.Value) { - fastpathTV.EncMapUint8Float64V(rv.Interface().(map[uint8]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) } -func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8508,43 +6706,40 @@ func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint8BoolR(rv reflect.Value) { - fastpathTV.EncMapUint8BoolV(rv.Interface().(map[uint8]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) } -func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8554,43 +6749,40 @@ func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uint8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16IntfR(rv reflect.Value) { - fastpathTV.EncMapUint16IntfV(rv.Interface().(map[uint16]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) } -func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8600,43 +6792,40 @@ func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16StringR(rv reflect.Value) { - fastpathTV.EncMapUint16StringV(rv.Interface().(map[uint16]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) } -func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8646,43 +6835,40 @@ func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16UintR(rv reflect.Value) { - fastpathTV.EncMapUint16UintV(rv.Interface().(map[uint16]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) } -func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8692,43 +6878,40 @@ func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint8V(rv.Interface().(map[uint16]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) } -func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8738,43 +6921,40 @@ func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint16V(rv.Interface().(map[uint16]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) } -func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8784,43 +6964,40 @@ func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint32V(rv.Interface().(map[uint16]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) } -func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8830,43 +7007,40 @@ func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint16Uint64V(rv.Interface().(map[uint16]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) } -func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8876,43 +7050,40 @@ func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint16UintptrV(rv.Interface().(map[uint16]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) } -func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8922,43 +7093,40 @@ func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16IntR(rv reflect.Value) { - fastpathTV.EncMapUint16IntV(rv.Interface().(map[uint16]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) } -func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -8968,43 +7136,40 @@ func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Int8R(rv reflect.Value) { - fastpathTV.EncMapUint16Int8V(rv.Interface().(map[uint16]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) } -func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9014,43 +7179,40 @@ func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Int16R(rv reflect.Value) { - fastpathTV.EncMapUint16Int16V(rv.Interface().(map[uint16]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) } -func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9060,43 +7222,40 @@ func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Int32R(rv reflect.Value) { - fastpathTV.EncMapUint16Int32V(rv.Interface().(map[uint16]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) } -func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9106,43 +7265,40 @@ func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Int64R(rv reflect.Value) { - fastpathTV.EncMapUint16Int64V(rv.Interface().(map[uint16]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) } -func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9152,43 +7308,40 @@ func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Float32R(rv reflect.Value) { - fastpathTV.EncMapUint16Float32V(rv.Interface().(map[uint16]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) } -func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9198,43 +7351,40 @@ func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16Float64R(rv reflect.Value) { - fastpathTV.EncMapUint16Float64V(rv.Interface().(map[uint16]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) } -func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9244,43 +7394,40 @@ func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint16BoolR(rv reflect.Value) { - fastpathTV.EncMapUint16BoolV(rv.Interface().(map[uint16]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) } -func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9290,43 +7437,40 @@ func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uint16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32IntfR(rv reflect.Value) { - fastpathTV.EncMapUint32IntfV(rv.Interface().(map[uint32]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) } -func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9336,43 +7480,40 @@ func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32StringR(rv reflect.Value) { - fastpathTV.EncMapUint32StringV(rv.Interface().(map[uint32]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) } -func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9382,43 +7523,40 @@ func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32UintR(rv reflect.Value) { - fastpathTV.EncMapUint32UintV(rv.Interface().(map[uint32]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) } -func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9428,43 +7566,40 @@ func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint8V(rv.Interface().(map[uint32]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) } -func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9474,43 +7609,40 @@ func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint16V(rv.Interface().(map[uint32]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) } -func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9520,43 +7652,40 @@ func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint32V(rv.Interface().(map[uint32]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) } -func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9566,43 +7695,40 @@ func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint32Uint64V(rv.Interface().(map[uint32]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) } -func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9612,43 +7738,40 @@ func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint32UintptrV(rv.Interface().(map[uint32]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) } -func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9658,43 +7781,40 @@ func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32IntR(rv reflect.Value) { - fastpathTV.EncMapUint32IntV(rv.Interface().(map[uint32]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) } -func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9704,43 +7824,40 @@ func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Int8R(rv reflect.Value) { - fastpathTV.EncMapUint32Int8V(rv.Interface().(map[uint32]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) } -func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9750,43 +7867,40 @@ func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Int16R(rv reflect.Value) { - fastpathTV.EncMapUint32Int16V(rv.Interface().(map[uint32]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) } -func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9796,43 +7910,40 @@ func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Int32R(rv reflect.Value) { - fastpathTV.EncMapUint32Int32V(rv.Interface().(map[uint32]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) } -func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9842,43 +7953,40 @@ func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Int64R(rv reflect.Value) { - fastpathTV.EncMapUint32Int64V(rv.Interface().(map[uint32]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) } -func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9888,43 +7996,40 @@ func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Float32R(rv reflect.Value) { - fastpathTV.EncMapUint32Float32V(rv.Interface().(map[uint32]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) } -func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9934,43 +8039,40 @@ func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32Float64R(rv reflect.Value) { - fastpathTV.EncMapUint32Float64V(rv.Interface().(map[uint32]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) } -func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -9980,43 +8082,40 @@ func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint32BoolR(rv reflect.Value) { - fastpathTV.EncMapUint32BoolV(rv.Interface().(map[uint32]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) } -func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10026,43 +8125,40 @@ func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uint32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64IntfR(rv reflect.Value) { - fastpathTV.EncMapUint64IntfV(rv.Interface().(map[uint64]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) } -func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10072,43 +8168,40 @@ func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64StringR(rv reflect.Value) { - fastpathTV.EncMapUint64StringV(rv.Interface().(map[uint64]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) } -func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10118,43 +8211,40 @@ func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64UintR(rv reflect.Value) { - fastpathTV.EncMapUint64UintV(rv.Interface().(map[uint64]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) } -func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10164,43 +8254,40 @@ func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Uint8R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint8V(rv.Interface().(map[uint64]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) } -func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10210,43 +8297,40 @@ func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Uint16R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint16V(rv.Interface().(map[uint64]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) } -func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10256,43 +8340,40 @@ func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Uint32R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint32V(rv.Interface().(map[uint64]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) } -func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10302,43 +8383,40 @@ func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Uint64R(rv reflect.Value) { - fastpathTV.EncMapUint64Uint64V(rv.Interface().(map[uint64]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) } -func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10348,43 +8426,40 @@ func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64UintptrR(rv reflect.Value) { - fastpathTV.EncMapUint64UintptrV(rv.Interface().(map[uint64]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) } -func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10394,43 +8469,40 @@ func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64IntR(rv reflect.Value) { - fastpathTV.EncMapUint64IntV(rv.Interface().(map[uint64]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) } -func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10440,43 +8512,40 @@ func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, checkNil bool, e *Encoder) } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Int8R(rv reflect.Value) { - fastpathTV.EncMapUint64Int8V(rv.Interface().(map[uint64]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) } -func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10486,43 +8555,40 @@ func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Int16R(rv reflect.Value) { - fastpathTV.EncMapUint64Int16V(rv.Interface().(map[uint64]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) } -func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10532,43 +8598,40 @@ func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Int32R(rv reflect.Value) { - fastpathTV.EncMapUint64Int32V(rv.Interface().(map[uint64]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) } -func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10578,43 +8641,40 @@ func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Int64R(rv reflect.Value) { - fastpathTV.EncMapUint64Int64V(rv.Interface().(map[uint64]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) } -func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10624,43 +8684,40 @@ func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uint64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Float32R(rv reflect.Value) { - fastpathTV.EncMapUint64Float32V(rv.Interface().(map[uint64]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) } -func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10670,43 +8727,40 @@ func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64Float64R(rv reflect.Value) { - fastpathTV.EncMapUint64Float64V(rv.Interface().(map[uint64]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) } -func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10716,43 +8770,40 @@ func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUint64BoolR(rv reflect.Value) { - fastpathTV.EncMapUint64BoolV(rv.Interface().(map[uint64]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) } -func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10762,43 +8813,40 @@ func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(uint64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uint64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeUint(uint64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrIntfR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntfV(rv.Interface().(map[uintptr]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) } -func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10808,43 +8856,40 @@ func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrStringR(rv reflect.Value) { - fastpathTV.EncMapUintptrStringV(rv.Interface().(map[uintptr]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) } -func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10854,43 +8899,40 @@ func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUintR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintV(rv.Interface().(map[uintptr]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) } -func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10900,43 +8942,40 @@ func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUint8R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint8V(rv.Interface().(map[uintptr]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) } -func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10946,43 +8985,40 @@ func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUint16R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint16V(rv.Interface().(map[uintptr]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) } -func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -10992,43 +9028,40 @@ func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUint32R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint32V(rv.Interface().(map[uintptr]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) } -func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11038,43 +9071,40 @@ func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUint64R(rv reflect.Value) { - fastpathTV.EncMapUintptrUint64V(rv.Interface().(map[uintptr]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) } -func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11084,43 +9114,40 @@ func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, e * } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrUintptrR(rv reflect.Value) { - fastpathTV.EncMapUintptrUintptrV(rv.Interface().(map[uintptr]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) } -func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11130,43 +9157,40 @@ func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrIntR(rv reflect.Value) { - fastpathTV.EncMapUintptrIntV(rv.Interface().(map[uintptr]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) } -func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11176,43 +9200,40 @@ func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, checkNil bool, e *Encode } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrInt8R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt8V(rv.Interface().(map[uintptr]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) } -func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11222,43 +9243,40 @@ func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrInt16R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt16V(rv.Interface().(map[uintptr]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) } -func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11268,43 +9286,40 @@ func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrInt32R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt32V(rv.Interface().(map[uintptr]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) } -func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11314,43 +9329,40 @@ func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrInt64R(rv reflect.Value) { - fastpathTV.EncMapUintptrInt64V(rv.Interface().(map[uintptr]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) } -func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11360,43 +9372,40 @@ func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, checkNil bool, e *En } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[uintptr(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrFloat32R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat32V(rv.Interface().(map[uintptr]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) } -func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11406,43 +9415,40 @@ func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrFloat64R(rv reflect.Value) { - fastpathTV.EncMapUintptrFloat64V(rv.Interface().(map[uintptr]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) } -func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11452,43 +9458,40 @@ func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, e } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapUintptrBoolR(rv reflect.Value) { - fastpathTV.EncMapUintptrBoolV(rv.Interface().(map[uintptr]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) } -func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]uint64, len(v)) var i int @@ -11498,43 +9501,40 @@ func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, checkNil bool, e *Enco } sort.Sort(uintSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(uintptr(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[uintptr(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } e.encode(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntIntfR(rv reflect.Value) { - fastpathTV.EncMapIntIntfV(rv.Interface().(map[int]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) } -func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11544,43 +9544,40 @@ func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, checkNil bool, e *Encod } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntStringR(rv reflect.Value) { - fastpathTV.EncMapIntStringV(rv.Interface().(map[int]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) } -func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11590,43 +9587,40 @@ func (_ fastpathT) EncMapIntStringV(v map[int]string, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUintR(rv reflect.Value) { - fastpathTV.EncMapIntUintV(rv.Interface().(map[int]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) } -func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11636,43 +9630,40 @@ func (_ fastpathT) EncMapIntUintV(v map[int]uint, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUint8R(rv reflect.Value) { - fastpathTV.EncMapIntUint8V(rv.Interface().(map[int]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) } -func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11682,43 +9673,40 @@ func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUint16R(rv reflect.Value) { - fastpathTV.EncMapIntUint16V(rv.Interface().(map[int]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) } -func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11728,43 +9716,40 @@ func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUint32R(rv reflect.Value) { - fastpathTV.EncMapIntUint32V(rv.Interface().(map[int]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) } -func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11774,43 +9759,40 @@ func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUint64R(rv reflect.Value) { - fastpathTV.EncMapIntUint64V(rv.Interface().(map[int]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) } -func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11820,43 +9802,40 @@ func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntUintptrR(rv reflect.Value) { - fastpathTV.EncMapIntUintptrV(rv.Interface().(map[int]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) } -func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11866,43 +9845,40 @@ func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntIntR(rv reflect.Value) { - fastpathTV.EncMapIntIntV(rv.Interface().(map[int]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) } -func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11912,43 +9888,40 @@ func (_ fastpathT) EncMapIntIntV(v map[int]int, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntInt8R(rv reflect.Value) { - fastpathTV.EncMapIntInt8V(rv.Interface().(map[int]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) } -func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -11958,43 +9931,40 @@ func (_ fastpathT) EncMapIntInt8V(v map[int]int8, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntInt16R(rv reflect.Value) { - fastpathTV.EncMapIntInt16V(rv.Interface().(map[int]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) } -func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12004,43 +9974,40 @@ func (_ fastpathT) EncMapIntInt16V(v map[int]int16, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntInt32R(rv reflect.Value) { - fastpathTV.EncMapIntInt32V(rv.Interface().(map[int]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) } -func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12050,43 +10017,40 @@ func (_ fastpathT) EncMapIntInt32V(v map[int]int32, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntInt64R(rv reflect.Value) { - fastpathTV.EncMapIntInt64V(rv.Interface().(map[int]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) } -func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12096,43 +10060,40 @@ func (_ fastpathT) EncMapIntInt64V(v map[int]int64, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntFloat32R(rv reflect.Value) { - fastpathTV.EncMapIntFloat32V(rv.Interface().(map[int]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) } -func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12142,43 +10103,40 @@ func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntFloat64R(rv reflect.Value) { - fastpathTV.EncMapIntFloat64V(rv.Interface().(map[int]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) } -func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12188,43 +10146,40 @@ func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapIntBoolR(rv reflect.Value) { - fastpathTV.EncMapIntBoolV(rv.Interface().(map[int]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) } -func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12234,43 +10189,40 @@ func (_ fastpathT) EncMapIntBoolV(v map[int]bool, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[int(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8IntfR(rv reflect.Value) { - fastpathTV.EncMapInt8IntfV(rv.Interface().(map[int8]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) } -func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12280,43 +10232,40 @@ func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, checkNil bool, e *Enc } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8StringR(rv reflect.Value) { - fastpathTV.EncMapInt8StringV(rv.Interface().(map[int8]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) } -func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12326,43 +10275,40 @@ func (_ fastpathT) EncMapInt8StringV(v map[int8]string, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8UintR(rv reflect.Value) { - fastpathTV.EncMapInt8UintV(rv.Interface().(map[int8]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) } -func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12372,43 +10318,40 @@ func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint8V(rv.Interface().(map[int8]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) } -func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12418,43 +10361,40 @@ func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint16V(rv.Interface().(map[int8]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) } -func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12464,43 +10404,40 @@ func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint32V(rv.Interface().(map[int8]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) } -func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12510,43 +10447,40 @@ func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt8Uint64V(rv.Interface().(map[int8]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) } -func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12556,43 +10490,40 @@ func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt8UintptrV(rv.Interface().(map[int8]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) } -func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12602,43 +10533,40 @@ func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8IntR(rv reflect.Value) { - fastpathTV.EncMapInt8IntV(rv.Interface().(map[int8]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) } -func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12648,43 +10576,40 @@ func (_ fastpathT) EncMapInt8IntV(v map[int8]int, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Int8R(rv reflect.Value) { - fastpathTV.EncMapInt8Int8V(rv.Interface().(map[int8]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) } -func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12694,43 +10619,40 @@ func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Int16R(rv reflect.Value) { - fastpathTV.EncMapInt8Int16V(rv.Interface().(map[int8]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) } -func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12740,43 +10662,40 @@ func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Int32R(rv reflect.Value) { - fastpathTV.EncMapInt8Int32V(rv.Interface().(map[int8]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) } -func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12786,43 +10705,40 @@ func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Int64R(rv reflect.Value) { - fastpathTV.EncMapInt8Int64V(rv.Interface().(map[int8]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) } -func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12832,43 +10748,40 @@ func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int8(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Float32R(rv reflect.Value) { - fastpathTV.EncMapInt8Float32V(rv.Interface().(map[int8]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) } -func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12878,43 +10791,40 @@ func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8Float64R(rv reflect.Value) { - fastpathTV.EncMapInt8Float64V(rv.Interface().(map[int8]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) } -func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12924,43 +10834,40 @@ func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt8BoolR(rv reflect.Value) { - fastpathTV.EncMapInt8BoolV(rv.Interface().(map[int8]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) } -func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -12970,43 +10877,40 @@ func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int8(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[int8(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16IntfR(rv reflect.Value) { - fastpathTV.EncMapInt16IntfV(rv.Interface().(map[int16]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) } -func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13016,43 +10920,40 @@ func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, checkNil bool, e *E } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16StringR(rv reflect.Value) { - fastpathTV.EncMapInt16StringV(rv.Interface().(map[int16]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) } -func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13062,43 +10963,40 @@ func (_ fastpathT) EncMapInt16StringV(v map[int16]string, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16UintR(rv reflect.Value) { - fastpathTV.EncMapInt16UintV(rv.Interface().(map[int16]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) } -func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13108,43 +11006,40 @@ func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint8V(rv.Interface().(map[int16]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) } -func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13154,43 +11049,40 @@ func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint16V(rv.Interface().(map[int16]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) } -func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13200,43 +11092,40 @@ func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint32V(rv.Interface().(map[int16]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) } -func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13246,43 +11135,40 @@ func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt16Uint64V(rv.Interface().(map[int16]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) } -func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13292,43 +11178,40 @@ func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt16UintptrV(rv.Interface().(map[int16]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) } -func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13338,43 +11221,40 @@ func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16IntR(rv reflect.Value) { - fastpathTV.EncMapInt16IntV(rv.Interface().(map[int16]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) } -func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13384,43 +11264,40 @@ func (_ fastpathT) EncMapInt16IntV(v map[int16]int, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Int8R(rv reflect.Value) { - fastpathTV.EncMapInt16Int8V(rv.Interface().(map[int16]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) } -func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13430,43 +11307,40 @@ func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Int16R(rv reflect.Value) { - fastpathTV.EncMapInt16Int16V(rv.Interface().(map[int16]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) } -func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13476,43 +11350,40 @@ func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Int32R(rv reflect.Value) { - fastpathTV.EncMapInt16Int32V(rv.Interface().(map[int16]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) } -func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13522,43 +11393,40 @@ func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Int64R(rv reflect.Value) { - fastpathTV.EncMapInt16Int64V(rv.Interface().(map[int16]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) } -func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13568,43 +11436,40 @@ func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int16(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Float32R(rv reflect.Value) { - fastpathTV.EncMapInt16Float32V(rv.Interface().(map[int16]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) } -func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13614,43 +11479,40 @@ func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16Float64R(rv reflect.Value) { - fastpathTV.EncMapInt16Float64V(rv.Interface().(map[int16]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) } -func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13660,43 +11522,40 @@ func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt16BoolR(rv reflect.Value) { - fastpathTV.EncMapInt16BoolV(rv.Interface().(map[int16]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) } -func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13706,43 +11565,40 @@ func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int16(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[int16(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32IntfR(rv reflect.Value) { - fastpathTV.EncMapInt32IntfV(rv.Interface().(map[int32]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) } -func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13752,43 +11608,40 @@ func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, checkNil bool, e *E } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32StringR(rv reflect.Value) { - fastpathTV.EncMapInt32StringV(rv.Interface().(map[int32]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) } -func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13798,43 +11651,40 @@ func (_ fastpathT) EncMapInt32StringV(v map[int32]string, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32UintR(rv reflect.Value) { - fastpathTV.EncMapInt32UintV(rv.Interface().(map[int32]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) } -func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13844,43 +11694,40 @@ func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint8V(rv.Interface().(map[int32]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) } -func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13890,43 +11737,40 @@ func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint16V(rv.Interface().(map[int32]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) } -func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13936,43 +11780,40 @@ func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint32V(rv.Interface().(map[int32]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) } -func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -13982,43 +11823,40 @@ func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt32Uint64V(rv.Interface().(map[int32]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) } -func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14028,43 +11866,40 @@ func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt32UintptrV(rv.Interface().(map[int32]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) } -func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14074,43 +11909,40 @@ func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32IntR(rv reflect.Value) { - fastpathTV.EncMapInt32IntV(rv.Interface().(map[int32]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) } -func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14120,43 +11952,40 @@ func (_ fastpathT) EncMapInt32IntV(v map[int32]int, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Int8R(rv reflect.Value) { - fastpathTV.EncMapInt32Int8V(rv.Interface().(map[int32]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) } -func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14166,43 +11995,40 @@ func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Int16R(rv reflect.Value) { - fastpathTV.EncMapInt32Int16V(rv.Interface().(map[int32]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) } -func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14212,43 +12038,40 @@ func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Int32R(rv reflect.Value) { - fastpathTV.EncMapInt32Int32V(rv.Interface().(map[int32]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) } -func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14258,43 +12081,40 @@ func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Int64R(rv reflect.Value) { - fastpathTV.EncMapInt32Int64V(rv.Interface().(map[int32]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) } -func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14304,43 +12124,40 @@ func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int32(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Float32R(rv reflect.Value) { - fastpathTV.EncMapInt32Float32V(rv.Interface().(map[int32]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) } -func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14350,43 +12167,40 @@ func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32Float64R(rv reflect.Value) { - fastpathTV.EncMapInt32Float64V(rv.Interface().(map[int32]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) } -func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14396,43 +12210,40 @@ func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt32BoolR(rv reflect.Value) { - fastpathTV.EncMapInt32BoolV(rv.Interface().(map[int32]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) } -func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14442,43 +12253,40 @@ func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int32(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[int32(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64IntfR(rv reflect.Value) { - fastpathTV.EncMapInt64IntfV(rv.Interface().(map[int64]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) } -func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14488,43 +12296,40 @@ func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, checkNil bool, e *E } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64StringR(rv reflect.Value) { - fastpathTV.EncMapInt64StringV(rv.Interface().(map[int64]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) } -func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14534,43 +12339,40 @@ func (_ fastpathT) EncMapInt64StringV(v map[int64]string, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64UintR(rv reflect.Value) { - fastpathTV.EncMapInt64UintV(rv.Interface().(map[int64]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) } -func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14580,43 +12382,40 @@ func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Uint8R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint8V(rv.Interface().(map[int64]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) } -func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14626,43 +12425,40 @@ func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Uint16R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint16V(rv.Interface().(map[int64]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) } -func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14672,43 +12468,40 @@ func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Uint32R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint32V(rv.Interface().(map[int64]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) } -func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14718,43 +12511,40 @@ func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Uint64R(rv reflect.Value) { - fastpathTV.EncMapInt64Uint64V(rv.Interface().(map[int64]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) } -func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14764,43 +12554,40 @@ func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, checkNil bool, e *Enco } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64UintptrR(rv reflect.Value) { - fastpathTV.EncMapInt64UintptrV(rv.Interface().(map[int64]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) } -func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14810,43 +12597,40 @@ func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64IntR(rv reflect.Value) { - fastpathTV.EncMapInt64IntV(rv.Interface().(map[int64]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) } -func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14856,43 +12640,40 @@ func (_ fastpathT) EncMapInt64IntV(v map[int64]int, checkNil bool, e *Encoder) { } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Int8R(rv reflect.Value) { - fastpathTV.EncMapInt64Int8V(rv.Interface().(map[int64]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) } -func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14902,43 +12683,40 @@ func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Int16R(rv reflect.Value) { - fastpathTV.EncMapInt64Int16V(rv.Interface().(map[int64]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) } -func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14948,43 +12726,40 @@ func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Int32R(rv reflect.Value) { - fastpathTV.EncMapInt64Int32V(rv.Interface().(map[int64]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) } -func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -14994,43 +12769,40 @@ func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Int64R(rv reflect.Value) { - fastpathTV.EncMapInt64Int64V(rv.Interface().(map[int64]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) } -func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -15040,43 +12812,40 @@ func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, checkNil bool, e *Encode } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[int64(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Float32R(rv reflect.Value) { - fastpathTV.EncMapInt64Float32V(rv.Interface().(map[int64]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) } -func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -15086,43 +12855,40 @@ func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64Float64R(rv reflect.Value) { - fastpathTV.EncMapInt64Float64V(rv.Interface().(map[int64]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) } -func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -15132,43 +12898,40 @@ func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, checkNil bool, e *En } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapInt64BoolR(rv reflect.Value) { - fastpathTV.EncMapInt64BoolV(rv.Interface().(map[int64]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) } -func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]int64, len(v)) var i int @@ -15178,43 +12941,40 @@ func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, checkNil bool, e *Encoder) } sort.Sort(intSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(int64(k2))) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[int64(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeInt(int64(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolIntfR(rv reflect.Value) { - fastpathTV.EncMapBoolIntfV(rv.Interface().(map[bool]interface{}), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) } -func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15224,43 +12984,40 @@ func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, checkNil bool, e *Enc } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolStringR(rv reflect.Value) { - fastpathTV.EncMapBoolStringV(rv.Interface().(map[bool]string), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) } -func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15270,43 +13027,40 @@ func (_ fastpathT) EncMapBoolStringV(v map[bool]string, checkNil bool, e *Encode } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeString(c_UTF8, v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUintR(rv reflect.Value) { - fastpathTV.EncMapBoolUintV(rv.Interface().(map[bool]uint), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) } -func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15316,43 +13070,40 @@ func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, checkNil bool, e *Encoder) { } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUint8R(rv reflect.Value) { - fastpathTV.EncMapBoolUint8V(rv.Interface().(map[bool]uint8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) } -func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15362,43 +13113,40 @@ func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, checkNil bool, e *Encoder) } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUint16R(rv reflect.Value) { - fastpathTV.EncMapBoolUint16V(rv.Interface().(map[bool]uint16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) } -func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15408,43 +13156,40 @@ func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, checkNil bool, e *Encode } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUint32R(rv reflect.Value) { - fastpathTV.EncMapBoolUint32V(rv.Interface().(map[bool]uint32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) } -func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15454,43 +13199,40 @@ func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, checkNil bool, e *Encode } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUint64R(rv reflect.Value) { - fastpathTV.EncMapBoolUint64V(rv.Interface().(map[bool]uint64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) } -func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15500,43 +13242,40 @@ func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, checkNil bool, e *Encode } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeUint(uint64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolUintptrR(rv reflect.Value) { - fastpathTV.EncMapBoolUintptrV(rv.Interface().(map[bool]uintptr), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) } -func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15546,43 +13285,40 @@ func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, checkNil bool, e *Enco } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } e.encode(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolIntR(rv reflect.Value) { - fastpathTV.EncMapBoolIntV(rv.Interface().(map[bool]int), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) } -func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15592,43 +13328,40 @@ func (_ fastpathT) EncMapBoolIntV(v map[bool]int, checkNil bool, e *Encoder) { } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolInt8R(rv reflect.Value) { - fastpathTV.EncMapBoolInt8V(rv.Interface().(map[bool]int8), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) } -func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15638,43 +13371,40 @@ func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, checkNil bool, e *Encoder) { } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolInt16R(rv reflect.Value) { - fastpathTV.EncMapBoolInt16V(rv.Interface().(map[bool]int16), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) } -func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15684,43 +13414,40 @@ func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, checkNil bool, e *Encoder) } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolInt32R(rv reflect.Value) { - fastpathTV.EncMapBoolInt32V(rv.Interface().(map[bool]int32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) } -func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15730,43 +13457,40 @@ func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, checkNil bool, e *Encoder) } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolInt64R(rv reflect.Value) { - fastpathTV.EncMapBoolInt64V(rv.Interface().(map[bool]int64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) } -func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15776,43 +13500,40 @@ func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, checkNil bool, e *Encoder) } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v[bool(k2)])) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeInt(int64(v2)) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolFloat32R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat32V(rv.Interface().(map[bool]float32), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) } -func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15822,43 +13543,40 @@ func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, checkNil bool, e *Enco } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat32(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolFloat64R(rv reflect.Value) { - fastpathTV.EncMapBoolFloat64V(rv.Interface().(map[bool]float64), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) } -func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15868,43 +13586,40 @@ func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, checkNil bool, e *Enco } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeFloat64(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } -func (f *encFnInfo) fastpathEncMapBoolBoolR(rv reflect.Value) { - fastpathTV.EncMapBoolBoolV(rv.Interface().(map[bool]bool), fastpathCheckNilFalse, f.e) +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) } -func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() return } - ee.EncodeMapStart(len(v)) + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) if e.h.Canonical { v2 := make([]bool, len(v)) var i int @@ -15914,30 +13629,28 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { } sort.Sort(boolSlice(v2)) for _, k2 := range v2 { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(bool(k2)) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v[bool(k2)]) } } else { for k2, v2 := range v { - if cr != nil { - cr.sendContainerState(containerMapKey) + if esep { + ee.WriteMapElemKey() } ee.EncodeBool(k2) - if cr != nil { - cr.sendContainerState(containerMapValue) + if esep { + ee.WriteMapElemValue() } ee.EncodeBool(v2) } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + ee.WriteMapEnd() } // -- decode @@ -15947,2170 +13660,1899 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { switch v := iv.(type) { case []interface{}: - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceIntfV(v, false, d) case *[]interface{}: - v2, changed2 := fastpathTV.DecSliceIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceIntfV(*v, true, d); changed2 { *v = v2 } case map[interface{}]interface{}: - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfIntfV(v, false, d) case *map[interface{}]interface{}: - v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfIntfV(*v, true, d); changed2 { *v = v2 } case map[interface{}]string: - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfStringV(v, false, d) case *map[interface{}]string: - v2, changed2 := fastpathTV.DecMapIntfStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfStringV(*v, true, d); changed2 { *v = v2 } case map[interface{}]uint: - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUintV(v, false, d) case *map[interface{}]uint: - v2, changed2 := fastpathTV.DecMapIntfUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUintV(*v, true, d); changed2 { *v = v2 } case map[interface{}]uint8: - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUint8V(v, false, d) case *map[interface{}]uint8: - v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUint8V(*v, true, d); changed2 { *v = v2 } case map[interface{}]uint16: - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUint16V(v, false, d) case *map[interface{}]uint16: - v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUint16V(*v, true, d); changed2 { *v = v2 } case map[interface{}]uint32: - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUint32V(v, false, d) case *map[interface{}]uint32: - v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUint32V(*v, true, d); changed2 { *v = v2 } case map[interface{}]uint64: - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUint64V(v, false, d) case *map[interface{}]uint64: - v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUint64V(*v, true, d); changed2 { *v = v2 } case map[interface{}]uintptr: - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfUintptrV(v, false, d) case *map[interface{}]uintptr: - v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfUintptrV(*v, true, d); changed2 { *v = v2 } case map[interface{}]int: - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfIntV(v, false, d) case *map[interface{}]int: - v2, changed2 := fastpathTV.DecMapIntfIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfIntV(*v, true, d); changed2 { *v = v2 } case map[interface{}]int8: - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfInt8V(v, false, d) case *map[interface{}]int8: - v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfInt8V(*v, true, d); changed2 { *v = v2 } case map[interface{}]int16: - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfInt16V(v, false, d) case *map[interface{}]int16: - v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfInt16V(*v, true, d); changed2 { *v = v2 } case map[interface{}]int32: - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfInt32V(v, false, d) case *map[interface{}]int32: - v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfInt32V(*v, true, d); changed2 { *v = v2 } case map[interface{}]int64: - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfInt64V(v, false, d) case *map[interface{}]int64: - v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfInt64V(*v, true, d); changed2 { *v = v2 } case map[interface{}]float32: - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfFloat32V(v, false, d) case *map[interface{}]float32: - v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfFloat32V(*v, true, d); changed2 { *v = v2 } case map[interface{}]float64: - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfFloat64V(v, false, d) case *map[interface{}]float64: - v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfFloat64V(*v, true, d); changed2 { *v = v2 } case map[interface{}]bool: - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntfBoolV(v, false, d) case *map[interface{}]bool: - v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntfBoolV(*v, true, d); changed2 { *v = v2 } case []string: - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceStringV(v, false, d) case *[]string: - v2, changed2 := fastpathTV.DecSliceStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceStringV(*v, true, d); changed2 { *v = v2 } case map[string]interface{}: - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringIntfV(v, false, d) case *map[string]interface{}: - v2, changed2 := fastpathTV.DecMapStringIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringIntfV(*v, true, d); changed2 { *v = v2 } case map[string]string: - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringStringV(v, false, d) case *map[string]string: - v2, changed2 := fastpathTV.DecMapStringStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringStringV(*v, true, d); changed2 { *v = v2 } case map[string]uint: - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUintV(v, false, d) case *map[string]uint: - v2, changed2 := fastpathTV.DecMapStringUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUintV(*v, true, d); changed2 { *v = v2 } case map[string]uint8: - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUint8V(v, false, d) case *map[string]uint8: - v2, changed2 := fastpathTV.DecMapStringUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUint8V(*v, true, d); changed2 { *v = v2 } case map[string]uint16: - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUint16V(v, false, d) case *map[string]uint16: - v2, changed2 := fastpathTV.DecMapStringUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUint16V(*v, true, d); changed2 { *v = v2 } case map[string]uint32: - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUint32V(v, false, d) case *map[string]uint32: - v2, changed2 := fastpathTV.DecMapStringUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUint32V(*v, true, d); changed2 { *v = v2 } case map[string]uint64: - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUint64V(v, false, d) case *map[string]uint64: - v2, changed2 := fastpathTV.DecMapStringUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUint64V(*v, true, d); changed2 { *v = v2 } case map[string]uintptr: - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringUintptrV(v, false, d) case *map[string]uintptr: - v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringUintptrV(*v, true, d); changed2 { *v = v2 } case map[string]int: - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringIntV(v, false, d) case *map[string]int: - v2, changed2 := fastpathTV.DecMapStringIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringIntV(*v, true, d); changed2 { *v = v2 } case map[string]int8: - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringInt8V(v, false, d) case *map[string]int8: - v2, changed2 := fastpathTV.DecMapStringInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringInt8V(*v, true, d); changed2 { *v = v2 } case map[string]int16: - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringInt16V(v, false, d) case *map[string]int16: - v2, changed2 := fastpathTV.DecMapStringInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringInt16V(*v, true, d); changed2 { *v = v2 } case map[string]int32: - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringInt32V(v, false, d) case *map[string]int32: - v2, changed2 := fastpathTV.DecMapStringInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringInt32V(*v, true, d); changed2 { *v = v2 } case map[string]int64: - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringInt64V(v, false, d) case *map[string]int64: - v2, changed2 := fastpathTV.DecMapStringInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringInt64V(*v, true, d); changed2 { *v = v2 } case map[string]float32: - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringFloat32V(v, false, d) case *map[string]float32: - v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringFloat32V(*v, true, d); changed2 { *v = v2 } case map[string]float64: - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringFloat64V(v, false, d) case *map[string]float64: - v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringFloat64V(*v, true, d); changed2 { *v = v2 } case map[string]bool: - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapStringBoolV(v, false, d) case *map[string]bool: - v2, changed2 := fastpathTV.DecMapStringBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapStringBoolV(*v, true, d); changed2 { *v = v2 } case []float32: - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceFloat32V(v, false, d) case *[]float32: - v2, changed2 := fastpathTV.DecSliceFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceFloat32V(*v, true, d); changed2 { *v = v2 } case map[float32]interface{}: - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32IntfV(v, false, d) case *map[float32]interface{}: - v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32IntfV(*v, true, d); changed2 { *v = v2 } case map[float32]string: - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32StringV(v, false, d) case *map[float32]string: - v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32StringV(*v, true, d); changed2 { *v = v2 } case map[float32]uint: - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32UintV(v, false, d) case *map[float32]uint: - v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32UintV(*v, true, d); changed2 { *v = v2 } case map[float32]uint8: - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Uint8V(v, false, d) case *map[float32]uint8: - v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Uint8V(*v, true, d); changed2 { *v = v2 } case map[float32]uint16: - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Uint16V(v, false, d) case *map[float32]uint16: - v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Uint16V(*v, true, d); changed2 { *v = v2 } case map[float32]uint32: - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Uint32V(v, false, d) case *map[float32]uint32: - v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Uint32V(*v, true, d); changed2 { *v = v2 } case map[float32]uint64: - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Uint64V(v, false, d) case *map[float32]uint64: - v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Uint64V(*v, true, d); changed2 { *v = v2 } case map[float32]uintptr: - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32UintptrV(v, false, d) case *map[float32]uintptr: - v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32UintptrV(*v, true, d); changed2 { *v = v2 } case map[float32]int: - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32IntV(v, false, d) case *map[float32]int: - v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32IntV(*v, true, d); changed2 { *v = v2 } case map[float32]int8: - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Int8V(v, false, d) case *map[float32]int8: - v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Int8V(*v, true, d); changed2 { *v = v2 } case map[float32]int16: - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Int16V(v, false, d) case *map[float32]int16: - v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Int16V(*v, true, d); changed2 { *v = v2 } case map[float32]int32: - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Int32V(v, false, d) case *map[float32]int32: - v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Int32V(*v, true, d); changed2 { *v = v2 } case map[float32]int64: - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Int64V(v, false, d) case *map[float32]int64: - v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Int64V(*v, true, d); changed2 { *v = v2 } case map[float32]float32: - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Float32V(v, false, d) case *map[float32]float32: - v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Float32V(*v, true, d); changed2 { *v = v2 } case map[float32]float64: - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32Float64V(v, false, d) case *map[float32]float64: - v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32Float64V(*v, true, d); changed2 { *v = v2 } case map[float32]bool: - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat32BoolV(v, false, d) case *map[float32]bool: - v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat32BoolV(*v, true, d); changed2 { *v = v2 } case []float64: - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceFloat64V(v, false, d) case *[]float64: - v2, changed2 := fastpathTV.DecSliceFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceFloat64V(*v, true, d); changed2 { *v = v2 } case map[float64]interface{}: - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64IntfV(v, false, d) case *map[float64]interface{}: - v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64IntfV(*v, true, d); changed2 { *v = v2 } case map[float64]string: - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64StringV(v, false, d) case *map[float64]string: - v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64StringV(*v, true, d); changed2 { *v = v2 } case map[float64]uint: - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64UintV(v, false, d) case *map[float64]uint: - v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64UintV(*v, true, d); changed2 { *v = v2 } case map[float64]uint8: - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Uint8V(v, false, d) case *map[float64]uint8: - v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Uint8V(*v, true, d); changed2 { *v = v2 } case map[float64]uint16: - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Uint16V(v, false, d) case *map[float64]uint16: - v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Uint16V(*v, true, d); changed2 { *v = v2 } case map[float64]uint32: - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Uint32V(v, false, d) case *map[float64]uint32: - v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Uint32V(*v, true, d); changed2 { *v = v2 } case map[float64]uint64: - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Uint64V(v, false, d) case *map[float64]uint64: - v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Uint64V(*v, true, d); changed2 { *v = v2 } case map[float64]uintptr: - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64UintptrV(v, false, d) case *map[float64]uintptr: - v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64UintptrV(*v, true, d); changed2 { *v = v2 } case map[float64]int: - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64IntV(v, false, d) case *map[float64]int: - v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64IntV(*v, true, d); changed2 { *v = v2 } case map[float64]int8: - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Int8V(v, false, d) case *map[float64]int8: - v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Int8V(*v, true, d); changed2 { *v = v2 } case map[float64]int16: - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Int16V(v, false, d) case *map[float64]int16: - v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Int16V(*v, true, d); changed2 { *v = v2 } case map[float64]int32: - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Int32V(v, false, d) case *map[float64]int32: - v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Int32V(*v, true, d); changed2 { *v = v2 } case map[float64]int64: - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Int64V(v, false, d) case *map[float64]int64: - v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Int64V(*v, true, d); changed2 { *v = v2 } case map[float64]float32: - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Float32V(v, false, d) case *map[float64]float32: - v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Float32V(*v, true, d); changed2 { *v = v2 } case map[float64]float64: - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64Float64V(v, false, d) case *map[float64]float64: - v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64Float64V(*v, true, d); changed2 { *v = v2 } case map[float64]bool: - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapFloat64BoolV(v, false, d) case *map[float64]bool: - v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapFloat64BoolV(*v, true, d); changed2 { *v = v2 } case []uint: - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceUintV(v, false, d) case *[]uint: - v2, changed2 := fastpathTV.DecSliceUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceUintV(*v, true, d); changed2 { *v = v2 } case map[uint]interface{}: - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintIntfV(v, false, d) case *map[uint]interface{}: - v2, changed2 := fastpathTV.DecMapUintIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintIntfV(*v, true, d); changed2 { *v = v2 } case map[uint]string: - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintStringV(v, false, d) case *map[uint]string: - v2, changed2 := fastpathTV.DecMapUintStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintStringV(*v, true, d); changed2 { *v = v2 } case map[uint]uint: - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUintV(v, false, d) case *map[uint]uint: - v2, changed2 := fastpathTV.DecMapUintUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUintV(*v, true, d); changed2 { *v = v2 } case map[uint]uint8: - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUint8V(v, false, d) case *map[uint]uint8: - v2, changed2 := fastpathTV.DecMapUintUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUint8V(*v, true, d); changed2 { *v = v2 } case map[uint]uint16: - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUint16V(v, false, d) case *map[uint]uint16: - v2, changed2 := fastpathTV.DecMapUintUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUint16V(*v, true, d); changed2 { *v = v2 } case map[uint]uint32: - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUint32V(v, false, d) case *map[uint]uint32: - v2, changed2 := fastpathTV.DecMapUintUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUint32V(*v, true, d); changed2 { *v = v2 } case map[uint]uint64: - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUint64V(v, false, d) case *map[uint]uint64: - v2, changed2 := fastpathTV.DecMapUintUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUint64V(*v, true, d); changed2 { *v = v2 } case map[uint]uintptr: - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintUintptrV(v, false, d) case *map[uint]uintptr: - v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintUintptrV(*v, true, d); changed2 { *v = v2 } case map[uint]int: - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintIntV(v, false, d) case *map[uint]int: - v2, changed2 := fastpathTV.DecMapUintIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintIntV(*v, true, d); changed2 { *v = v2 } case map[uint]int8: - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintInt8V(v, false, d) case *map[uint]int8: - v2, changed2 := fastpathTV.DecMapUintInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintInt8V(*v, true, d); changed2 { *v = v2 } case map[uint]int16: - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintInt16V(v, false, d) case *map[uint]int16: - v2, changed2 := fastpathTV.DecMapUintInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintInt16V(*v, true, d); changed2 { *v = v2 } case map[uint]int32: - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintInt32V(v, false, d) case *map[uint]int32: - v2, changed2 := fastpathTV.DecMapUintInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintInt32V(*v, true, d); changed2 { *v = v2 } case map[uint]int64: - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintInt64V(v, false, d) case *map[uint]int64: - v2, changed2 := fastpathTV.DecMapUintInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintInt64V(*v, true, d); changed2 { *v = v2 } case map[uint]float32: - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintFloat32V(v, false, d) case *map[uint]float32: - v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintFloat32V(*v, true, d); changed2 { *v = v2 } case map[uint]float64: - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintFloat64V(v, false, d) case *map[uint]float64: - v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintFloat64V(*v, true, d); changed2 { *v = v2 } case map[uint]bool: - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintBoolV(v, false, d) case *map[uint]bool: - v2, changed2 := fastpathTV.DecMapUintBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintBoolV(*v, true, d); changed2 { *v = v2 } case map[uint8]interface{}: - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8IntfV(v, false, d) case *map[uint8]interface{}: - v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8IntfV(*v, true, d); changed2 { *v = v2 } case map[uint8]string: - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8StringV(v, false, d) case *map[uint8]string: - v2, changed2 := fastpathTV.DecMapUint8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8StringV(*v, true, d); changed2 { *v = v2 } case map[uint8]uint: - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8UintV(v, false, d) case *map[uint8]uint: - v2, changed2 := fastpathTV.DecMapUint8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8UintV(*v, true, d); changed2 { *v = v2 } case map[uint8]uint8: - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Uint8V(v, false, d) case *map[uint8]uint8: - v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Uint8V(*v, true, d); changed2 { *v = v2 } case map[uint8]uint16: - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Uint16V(v, false, d) case *map[uint8]uint16: - v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Uint16V(*v, true, d); changed2 { *v = v2 } case map[uint8]uint32: - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Uint32V(v, false, d) case *map[uint8]uint32: - v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Uint32V(*v, true, d); changed2 { *v = v2 } case map[uint8]uint64: - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Uint64V(v, false, d) case *map[uint8]uint64: - v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Uint64V(*v, true, d); changed2 { *v = v2 } case map[uint8]uintptr: - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8UintptrV(v, false, d) case *map[uint8]uintptr: - v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8UintptrV(*v, true, d); changed2 { *v = v2 } case map[uint8]int: - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8IntV(v, false, d) case *map[uint8]int: - v2, changed2 := fastpathTV.DecMapUint8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8IntV(*v, true, d); changed2 { *v = v2 } case map[uint8]int8: - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Int8V(v, false, d) case *map[uint8]int8: - v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Int8V(*v, true, d); changed2 { *v = v2 } case map[uint8]int16: - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Int16V(v, false, d) case *map[uint8]int16: - v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Int16V(*v, true, d); changed2 { *v = v2 } case map[uint8]int32: - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Int32V(v, false, d) case *map[uint8]int32: - v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Int32V(*v, true, d); changed2 { *v = v2 } case map[uint8]int64: - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Int64V(v, false, d) case *map[uint8]int64: - v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Int64V(*v, true, d); changed2 { *v = v2 } case map[uint8]float32: - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Float32V(v, false, d) case *map[uint8]float32: - v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Float32V(*v, true, d); changed2 { *v = v2 } case map[uint8]float64: - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8Float64V(v, false, d) case *map[uint8]float64: - v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8Float64V(*v, true, d); changed2 { *v = v2 } case map[uint8]bool: - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint8BoolV(v, false, d) case *map[uint8]bool: - v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint8BoolV(*v, true, d); changed2 { *v = v2 } case []uint16: - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceUint16V(v, false, d) case *[]uint16: - v2, changed2 := fastpathTV.DecSliceUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceUint16V(*v, true, d); changed2 { *v = v2 } case map[uint16]interface{}: - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16IntfV(v, false, d) case *map[uint16]interface{}: - v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16IntfV(*v, true, d); changed2 { *v = v2 } case map[uint16]string: - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16StringV(v, false, d) case *map[uint16]string: - v2, changed2 := fastpathTV.DecMapUint16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16StringV(*v, true, d); changed2 { *v = v2 } case map[uint16]uint: - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16UintV(v, false, d) case *map[uint16]uint: - v2, changed2 := fastpathTV.DecMapUint16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16UintV(*v, true, d); changed2 { *v = v2 } case map[uint16]uint8: - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Uint8V(v, false, d) case *map[uint16]uint8: - v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Uint8V(*v, true, d); changed2 { *v = v2 } case map[uint16]uint16: - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Uint16V(v, false, d) case *map[uint16]uint16: - v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Uint16V(*v, true, d); changed2 { *v = v2 } case map[uint16]uint32: - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Uint32V(v, false, d) case *map[uint16]uint32: - v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Uint32V(*v, true, d); changed2 { *v = v2 } case map[uint16]uint64: - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Uint64V(v, false, d) case *map[uint16]uint64: - v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Uint64V(*v, true, d); changed2 { *v = v2 } case map[uint16]uintptr: - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16UintptrV(v, false, d) case *map[uint16]uintptr: - v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16UintptrV(*v, true, d); changed2 { *v = v2 } case map[uint16]int: - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16IntV(v, false, d) case *map[uint16]int: - v2, changed2 := fastpathTV.DecMapUint16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16IntV(*v, true, d); changed2 { *v = v2 } case map[uint16]int8: - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Int8V(v, false, d) case *map[uint16]int8: - v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Int8V(*v, true, d); changed2 { *v = v2 } case map[uint16]int16: - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Int16V(v, false, d) case *map[uint16]int16: - v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Int16V(*v, true, d); changed2 { *v = v2 } case map[uint16]int32: - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Int32V(v, false, d) case *map[uint16]int32: - v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Int32V(*v, true, d); changed2 { *v = v2 } case map[uint16]int64: - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Int64V(v, false, d) case *map[uint16]int64: - v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Int64V(*v, true, d); changed2 { *v = v2 } case map[uint16]float32: - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Float32V(v, false, d) case *map[uint16]float32: - v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Float32V(*v, true, d); changed2 { *v = v2 } case map[uint16]float64: - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16Float64V(v, false, d) case *map[uint16]float64: - v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16Float64V(*v, true, d); changed2 { *v = v2 } case map[uint16]bool: - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint16BoolV(v, false, d) case *map[uint16]bool: - v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint16BoolV(*v, true, d); changed2 { *v = v2 } case []uint32: - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceUint32V(v, false, d) case *[]uint32: - v2, changed2 := fastpathTV.DecSliceUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceUint32V(*v, true, d); changed2 { *v = v2 } case map[uint32]interface{}: - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32IntfV(v, false, d) case *map[uint32]interface{}: - v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32IntfV(*v, true, d); changed2 { *v = v2 } case map[uint32]string: - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32StringV(v, false, d) case *map[uint32]string: - v2, changed2 := fastpathTV.DecMapUint32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32StringV(*v, true, d); changed2 { *v = v2 } case map[uint32]uint: - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32UintV(v, false, d) case *map[uint32]uint: - v2, changed2 := fastpathTV.DecMapUint32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32UintV(*v, true, d); changed2 { *v = v2 } case map[uint32]uint8: - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Uint8V(v, false, d) case *map[uint32]uint8: - v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Uint8V(*v, true, d); changed2 { *v = v2 } case map[uint32]uint16: - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Uint16V(v, false, d) case *map[uint32]uint16: - v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Uint16V(*v, true, d); changed2 { *v = v2 } case map[uint32]uint32: - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Uint32V(v, false, d) case *map[uint32]uint32: - v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Uint32V(*v, true, d); changed2 { *v = v2 } case map[uint32]uint64: - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Uint64V(v, false, d) case *map[uint32]uint64: - v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Uint64V(*v, true, d); changed2 { *v = v2 } case map[uint32]uintptr: - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32UintptrV(v, false, d) case *map[uint32]uintptr: - v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32UintptrV(*v, true, d); changed2 { *v = v2 } case map[uint32]int: - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32IntV(v, false, d) case *map[uint32]int: - v2, changed2 := fastpathTV.DecMapUint32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32IntV(*v, true, d); changed2 { *v = v2 } case map[uint32]int8: - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Int8V(v, false, d) case *map[uint32]int8: - v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Int8V(*v, true, d); changed2 { *v = v2 } case map[uint32]int16: - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Int16V(v, false, d) case *map[uint32]int16: - v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Int16V(*v, true, d); changed2 { *v = v2 } case map[uint32]int32: - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Int32V(v, false, d) case *map[uint32]int32: - v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Int32V(*v, true, d); changed2 { *v = v2 } case map[uint32]int64: - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Int64V(v, false, d) case *map[uint32]int64: - v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Int64V(*v, true, d); changed2 { *v = v2 } case map[uint32]float32: - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Float32V(v, false, d) case *map[uint32]float32: - v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Float32V(*v, true, d); changed2 { *v = v2 } case map[uint32]float64: - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32Float64V(v, false, d) case *map[uint32]float64: - v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32Float64V(*v, true, d); changed2 { *v = v2 } case map[uint32]bool: - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint32BoolV(v, false, d) case *map[uint32]bool: - v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint32BoolV(*v, true, d); changed2 { *v = v2 } case []uint64: - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceUint64V(v, false, d) case *[]uint64: - v2, changed2 := fastpathTV.DecSliceUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceUint64V(*v, true, d); changed2 { *v = v2 } case map[uint64]interface{}: - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64IntfV(v, false, d) case *map[uint64]interface{}: - v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64IntfV(*v, true, d); changed2 { *v = v2 } case map[uint64]string: - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64StringV(v, false, d) case *map[uint64]string: - v2, changed2 := fastpathTV.DecMapUint64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64StringV(*v, true, d); changed2 { *v = v2 } case map[uint64]uint: - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64UintV(v, false, d) case *map[uint64]uint: - v2, changed2 := fastpathTV.DecMapUint64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64UintV(*v, true, d); changed2 { *v = v2 } case map[uint64]uint8: - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Uint8V(v, false, d) case *map[uint64]uint8: - v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Uint8V(*v, true, d); changed2 { *v = v2 } case map[uint64]uint16: - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Uint16V(v, false, d) case *map[uint64]uint16: - v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Uint16V(*v, true, d); changed2 { *v = v2 } case map[uint64]uint32: - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Uint32V(v, false, d) case *map[uint64]uint32: - v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Uint32V(*v, true, d); changed2 { *v = v2 } case map[uint64]uint64: - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Uint64V(v, false, d) case *map[uint64]uint64: - v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Uint64V(*v, true, d); changed2 { *v = v2 } case map[uint64]uintptr: - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64UintptrV(v, false, d) case *map[uint64]uintptr: - v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64UintptrV(*v, true, d); changed2 { *v = v2 } case map[uint64]int: - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64IntV(v, false, d) case *map[uint64]int: - v2, changed2 := fastpathTV.DecMapUint64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64IntV(*v, true, d); changed2 { *v = v2 } case map[uint64]int8: - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Int8V(v, false, d) case *map[uint64]int8: - v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Int8V(*v, true, d); changed2 { *v = v2 } case map[uint64]int16: - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Int16V(v, false, d) case *map[uint64]int16: - v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Int16V(*v, true, d); changed2 { *v = v2 } case map[uint64]int32: - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Int32V(v, false, d) case *map[uint64]int32: - v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Int32V(*v, true, d); changed2 { *v = v2 } case map[uint64]int64: - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Int64V(v, false, d) case *map[uint64]int64: - v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Int64V(*v, true, d); changed2 { *v = v2 } case map[uint64]float32: - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Float32V(v, false, d) case *map[uint64]float32: - v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Float32V(*v, true, d); changed2 { *v = v2 } case map[uint64]float64: - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64Float64V(v, false, d) case *map[uint64]float64: - v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64Float64V(*v, true, d); changed2 { *v = v2 } case map[uint64]bool: - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUint64BoolV(v, false, d) case *map[uint64]bool: - v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUint64BoolV(*v, true, d); changed2 { *v = v2 } case []uintptr: - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceUintptrV(v, false, d) case *[]uintptr: - v2, changed2 := fastpathTV.DecSliceUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceUintptrV(*v, true, d); changed2 { *v = v2 } case map[uintptr]interface{}: - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrIntfV(v, false, d) case *map[uintptr]interface{}: - v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrIntfV(*v, true, d); changed2 { *v = v2 } case map[uintptr]string: - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrStringV(v, false, d) case *map[uintptr]string: - v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrStringV(*v, true, d); changed2 { *v = v2 } case map[uintptr]uint: - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUintV(v, false, d) case *map[uintptr]uint: - v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUintV(*v, true, d); changed2 { *v = v2 } case map[uintptr]uint8: - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUint8V(v, false, d) case *map[uintptr]uint8: - v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUint8V(*v, true, d); changed2 { *v = v2 } case map[uintptr]uint16: - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUint16V(v, false, d) case *map[uintptr]uint16: - v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUint16V(*v, true, d); changed2 { *v = v2 } case map[uintptr]uint32: - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUint32V(v, false, d) case *map[uintptr]uint32: - v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUint32V(*v, true, d); changed2 { *v = v2 } case map[uintptr]uint64: - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUint64V(v, false, d) case *map[uintptr]uint64: - v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUint64V(*v, true, d); changed2 { *v = v2 } case map[uintptr]uintptr: - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrUintptrV(v, false, d) case *map[uintptr]uintptr: - v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrUintptrV(*v, true, d); changed2 { *v = v2 } case map[uintptr]int: - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrIntV(v, false, d) case *map[uintptr]int: - v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrIntV(*v, true, d); changed2 { *v = v2 } case map[uintptr]int8: - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrInt8V(v, false, d) case *map[uintptr]int8: - v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrInt8V(*v, true, d); changed2 { *v = v2 } case map[uintptr]int16: - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrInt16V(v, false, d) case *map[uintptr]int16: - v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrInt16V(*v, true, d); changed2 { *v = v2 } case map[uintptr]int32: - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrInt32V(v, false, d) case *map[uintptr]int32: - v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrInt32V(*v, true, d); changed2 { *v = v2 } case map[uintptr]int64: - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrInt64V(v, false, d) case *map[uintptr]int64: - v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrInt64V(*v, true, d); changed2 { *v = v2 } case map[uintptr]float32: - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrFloat32V(v, false, d) case *map[uintptr]float32: - v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrFloat32V(*v, true, d); changed2 { *v = v2 } case map[uintptr]float64: - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrFloat64V(v, false, d) case *map[uintptr]float64: - v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrFloat64V(*v, true, d); changed2 { *v = v2 } case map[uintptr]bool: - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapUintptrBoolV(v, false, d) case *map[uintptr]bool: - v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapUintptrBoolV(*v, true, d); changed2 { *v = v2 } case []int: - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceIntV(v, false, d) case *[]int: - v2, changed2 := fastpathTV.DecSliceIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceIntV(*v, true, d); changed2 { *v = v2 } case map[int]interface{}: - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntIntfV(v, false, d) case *map[int]interface{}: - v2, changed2 := fastpathTV.DecMapIntIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntIntfV(*v, true, d); changed2 { *v = v2 } case map[int]string: - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntStringV(v, false, d) case *map[int]string: - v2, changed2 := fastpathTV.DecMapIntStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntStringV(*v, true, d); changed2 { *v = v2 } case map[int]uint: - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUintV(v, false, d) case *map[int]uint: - v2, changed2 := fastpathTV.DecMapIntUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUintV(*v, true, d); changed2 { *v = v2 } case map[int]uint8: - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUint8V(v, false, d) case *map[int]uint8: - v2, changed2 := fastpathTV.DecMapIntUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUint8V(*v, true, d); changed2 { *v = v2 } case map[int]uint16: - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUint16V(v, false, d) case *map[int]uint16: - v2, changed2 := fastpathTV.DecMapIntUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUint16V(*v, true, d); changed2 { *v = v2 } case map[int]uint32: - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUint32V(v, false, d) case *map[int]uint32: - v2, changed2 := fastpathTV.DecMapIntUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUint32V(*v, true, d); changed2 { *v = v2 } case map[int]uint64: - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUint64V(v, false, d) case *map[int]uint64: - v2, changed2 := fastpathTV.DecMapIntUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUint64V(*v, true, d); changed2 { *v = v2 } case map[int]uintptr: - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntUintptrV(v, false, d) case *map[int]uintptr: - v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntUintptrV(*v, true, d); changed2 { *v = v2 } case map[int]int: - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntIntV(v, false, d) case *map[int]int: - v2, changed2 := fastpathTV.DecMapIntIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntIntV(*v, true, d); changed2 { *v = v2 } case map[int]int8: - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntInt8V(v, false, d) case *map[int]int8: - v2, changed2 := fastpathTV.DecMapIntInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntInt8V(*v, true, d); changed2 { *v = v2 } case map[int]int16: - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntInt16V(v, false, d) case *map[int]int16: - v2, changed2 := fastpathTV.DecMapIntInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntInt16V(*v, true, d); changed2 { *v = v2 } case map[int]int32: - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntInt32V(v, false, d) case *map[int]int32: - v2, changed2 := fastpathTV.DecMapIntInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntInt32V(*v, true, d); changed2 { *v = v2 } case map[int]int64: - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntInt64V(v, false, d) case *map[int]int64: - v2, changed2 := fastpathTV.DecMapIntInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntInt64V(*v, true, d); changed2 { *v = v2 } case map[int]float32: - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntFloat32V(v, false, d) case *map[int]float32: - v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntFloat32V(*v, true, d); changed2 { *v = v2 } case map[int]float64: - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntFloat64V(v, false, d) case *map[int]float64: - v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntFloat64V(*v, true, d); changed2 { *v = v2 } case map[int]bool: - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapIntBoolV(v, false, d) case *map[int]bool: - v2, changed2 := fastpathTV.DecMapIntBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapIntBoolV(*v, true, d); changed2 { *v = v2 } case []int8: - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceInt8V(v, false, d) case *[]int8: - v2, changed2 := fastpathTV.DecSliceInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceInt8V(*v, true, d); changed2 { *v = v2 } case map[int8]interface{}: - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8IntfV(v, false, d) case *map[int8]interface{}: - v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8IntfV(*v, true, d); changed2 { *v = v2 } case map[int8]string: - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8StringV(v, false, d) case *map[int8]string: - v2, changed2 := fastpathTV.DecMapInt8StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8StringV(*v, true, d); changed2 { *v = v2 } case map[int8]uint: - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8UintV(v, false, d) case *map[int8]uint: - v2, changed2 := fastpathTV.DecMapInt8UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8UintV(*v, true, d); changed2 { *v = v2 } case map[int8]uint8: - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Uint8V(v, false, d) case *map[int8]uint8: - v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Uint8V(*v, true, d); changed2 { *v = v2 } case map[int8]uint16: - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Uint16V(v, false, d) case *map[int8]uint16: - v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Uint16V(*v, true, d); changed2 { *v = v2 } case map[int8]uint32: - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Uint32V(v, false, d) case *map[int8]uint32: - v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Uint32V(*v, true, d); changed2 { *v = v2 } case map[int8]uint64: - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Uint64V(v, false, d) case *map[int8]uint64: - v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Uint64V(*v, true, d); changed2 { *v = v2 } case map[int8]uintptr: - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8UintptrV(v, false, d) case *map[int8]uintptr: - v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8UintptrV(*v, true, d); changed2 { *v = v2 } case map[int8]int: - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8IntV(v, false, d) case *map[int8]int: - v2, changed2 := fastpathTV.DecMapInt8IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8IntV(*v, true, d); changed2 { *v = v2 } case map[int8]int8: - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Int8V(v, false, d) case *map[int8]int8: - v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Int8V(*v, true, d); changed2 { *v = v2 } case map[int8]int16: - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Int16V(v, false, d) case *map[int8]int16: - v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Int16V(*v, true, d); changed2 { *v = v2 } case map[int8]int32: - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Int32V(v, false, d) case *map[int8]int32: - v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Int32V(*v, true, d); changed2 { *v = v2 } case map[int8]int64: - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Int64V(v, false, d) case *map[int8]int64: - v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Int64V(*v, true, d); changed2 { *v = v2 } case map[int8]float32: - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Float32V(v, false, d) case *map[int8]float32: - v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Float32V(*v, true, d); changed2 { *v = v2 } case map[int8]float64: - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8Float64V(v, false, d) case *map[int8]float64: - v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8Float64V(*v, true, d); changed2 { *v = v2 } case map[int8]bool: - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt8BoolV(v, false, d) case *map[int8]bool: - v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt8BoolV(*v, true, d); changed2 { *v = v2 } case []int16: - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceInt16V(v, false, d) case *[]int16: - v2, changed2 := fastpathTV.DecSliceInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceInt16V(*v, true, d); changed2 { *v = v2 } case map[int16]interface{}: - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16IntfV(v, false, d) case *map[int16]interface{}: - v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16IntfV(*v, true, d); changed2 { *v = v2 } case map[int16]string: - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16StringV(v, false, d) case *map[int16]string: - v2, changed2 := fastpathTV.DecMapInt16StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16StringV(*v, true, d); changed2 { *v = v2 } case map[int16]uint: - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16UintV(v, false, d) case *map[int16]uint: - v2, changed2 := fastpathTV.DecMapInt16UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16UintV(*v, true, d); changed2 { *v = v2 } case map[int16]uint8: - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Uint8V(v, false, d) case *map[int16]uint8: - v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Uint8V(*v, true, d); changed2 { *v = v2 } case map[int16]uint16: - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Uint16V(v, false, d) case *map[int16]uint16: - v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Uint16V(*v, true, d); changed2 { *v = v2 } case map[int16]uint32: - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Uint32V(v, false, d) case *map[int16]uint32: - v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Uint32V(*v, true, d); changed2 { *v = v2 } case map[int16]uint64: - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Uint64V(v, false, d) case *map[int16]uint64: - v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Uint64V(*v, true, d); changed2 { *v = v2 } case map[int16]uintptr: - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16UintptrV(v, false, d) case *map[int16]uintptr: - v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16UintptrV(*v, true, d); changed2 { *v = v2 } case map[int16]int: - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16IntV(v, false, d) case *map[int16]int: - v2, changed2 := fastpathTV.DecMapInt16IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16IntV(*v, true, d); changed2 { *v = v2 } case map[int16]int8: - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Int8V(v, false, d) case *map[int16]int8: - v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Int8V(*v, true, d); changed2 { *v = v2 } case map[int16]int16: - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Int16V(v, false, d) case *map[int16]int16: - v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Int16V(*v, true, d); changed2 { *v = v2 } case map[int16]int32: - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Int32V(v, false, d) case *map[int16]int32: - v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Int32V(*v, true, d); changed2 { *v = v2 } case map[int16]int64: - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Int64V(v, false, d) case *map[int16]int64: - v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Int64V(*v, true, d); changed2 { *v = v2 } case map[int16]float32: - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Float32V(v, false, d) case *map[int16]float32: - v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Float32V(*v, true, d); changed2 { *v = v2 } case map[int16]float64: - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16Float64V(v, false, d) case *map[int16]float64: - v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16Float64V(*v, true, d); changed2 { *v = v2 } case map[int16]bool: - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt16BoolV(v, false, d) case *map[int16]bool: - v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt16BoolV(*v, true, d); changed2 { *v = v2 } case []int32: - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceInt32V(v, false, d) case *[]int32: - v2, changed2 := fastpathTV.DecSliceInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceInt32V(*v, true, d); changed2 { *v = v2 } case map[int32]interface{}: - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32IntfV(v, false, d) case *map[int32]interface{}: - v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32IntfV(*v, true, d); changed2 { *v = v2 } case map[int32]string: - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32StringV(v, false, d) case *map[int32]string: - v2, changed2 := fastpathTV.DecMapInt32StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32StringV(*v, true, d); changed2 { *v = v2 } case map[int32]uint: - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32UintV(v, false, d) case *map[int32]uint: - v2, changed2 := fastpathTV.DecMapInt32UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32UintV(*v, true, d); changed2 { *v = v2 } case map[int32]uint8: - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Uint8V(v, false, d) case *map[int32]uint8: - v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Uint8V(*v, true, d); changed2 { *v = v2 } case map[int32]uint16: - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Uint16V(v, false, d) case *map[int32]uint16: - v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Uint16V(*v, true, d); changed2 { *v = v2 } case map[int32]uint32: - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Uint32V(v, false, d) case *map[int32]uint32: - v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Uint32V(*v, true, d); changed2 { *v = v2 } case map[int32]uint64: - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Uint64V(v, false, d) case *map[int32]uint64: - v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Uint64V(*v, true, d); changed2 { *v = v2 } case map[int32]uintptr: - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32UintptrV(v, false, d) case *map[int32]uintptr: - v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32UintptrV(*v, true, d); changed2 { *v = v2 } case map[int32]int: - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32IntV(v, false, d) case *map[int32]int: - v2, changed2 := fastpathTV.DecMapInt32IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32IntV(*v, true, d); changed2 { *v = v2 } case map[int32]int8: - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Int8V(v, false, d) case *map[int32]int8: - v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Int8V(*v, true, d); changed2 { *v = v2 } case map[int32]int16: - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Int16V(v, false, d) case *map[int32]int16: - v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Int16V(*v, true, d); changed2 { *v = v2 } case map[int32]int32: - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Int32V(v, false, d) case *map[int32]int32: - v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Int32V(*v, true, d); changed2 { *v = v2 } case map[int32]int64: - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Int64V(v, false, d) case *map[int32]int64: - v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Int64V(*v, true, d); changed2 { *v = v2 } case map[int32]float32: - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Float32V(v, false, d) case *map[int32]float32: - v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Float32V(*v, true, d); changed2 { *v = v2 } case map[int32]float64: - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32Float64V(v, false, d) case *map[int32]float64: - v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32Float64V(*v, true, d); changed2 { *v = v2 } case map[int32]bool: - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt32BoolV(v, false, d) case *map[int32]bool: - v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt32BoolV(*v, true, d); changed2 { *v = v2 } case []int64: - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceInt64V(v, false, d) case *[]int64: - v2, changed2 := fastpathTV.DecSliceInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceInt64V(*v, true, d); changed2 { *v = v2 } case map[int64]interface{}: - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64IntfV(v, false, d) case *map[int64]interface{}: - v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64IntfV(*v, true, d); changed2 { *v = v2 } case map[int64]string: - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64StringV(v, false, d) case *map[int64]string: - v2, changed2 := fastpathTV.DecMapInt64StringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64StringV(*v, true, d); changed2 { *v = v2 } case map[int64]uint: - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64UintV(v, false, d) case *map[int64]uint: - v2, changed2 := fastpathTV.DecMapInt64UintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64UintV(*v, true, d); changed2 { *v = v2 } case map[int64]uint8: - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Uint8V(v, false, d) case *map[int64]uint8: - v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Uint8V(*v, true, d); changed2 { *v = v2 } case map[int64]uint16: - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Uint16V(v, false, d) case *map[int64]uint16: - v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Uint16V(*v, true, d); changed2 { *v = v2 } case map[int64]uint32: - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Uint32V(v, false, d) case *map[int64]uint32: - v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Uint32V(*v, true, d); changed2 { *v = v2 } case map[int64]uint64: - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Uint64V(v, false, d) case *map[int64]uint64: - v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Uint64V(*v, true, d); changed2 { *v = v2 } case map[int64]uintptr: - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64UintptrV(v, false, d) case *map[int64]uintptr: - v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64UintptrV(*v, true, d); changed2 { *v = v2 } case map[int64]int: - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64IntV(v, false, d) case *map[int64]int: - v2, changed2 := fastpathTV.DecMapInt64IntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64IntV(*v, true, d); changed2 { *v = v2 } case map[int64]int8: - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Int8V(v, false, d) case *map[int64]int8: - v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Int8V(*v, true, d); changed2 { *v = v2 } case map[int64]int16: - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Int16V(v, false, d) case *map[int64]int16: - v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Int16V(*v, true, d); changed2 { *v = v2 } case map[int64]int32: - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Int32V(v, false, d) case *map[int64]int32: - v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Int32V(*v, true, d); changed2 { *v = v2 } case map[int64]int64: - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Int64V(v, false, d) case *map[int64]int64: - v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Int64V(*v, true, d); changed2 { *v = v2 } case map[int64]float32: - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Float32V(v, false, d) case *map[int64]float32: - v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Float32V(*v, true, d); changed2 { *v = v2 } case map[int64]float64: - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64Float64V(v, false, d) case *map[int64]float64: - v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64Float64V(*v, true, d); changed2 { *v = v2 } case map[int64]bool: - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapInt64BoolV(v, false, d) case *map[int64]bool: - v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapInt64BoolV(*v, true, d); changed2 { *v = v2 } case []bool: - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecSliceBoolV(v, false, d) case *[]bool: - v2, changed2 := fastpathTV.DecSliceBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecSliceBoolV(*v, true, d); changed2 { *v = v2 } case map[bool]interface{}: - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolIntfV(v, false, d) case *map[bool]interface{}: - v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolIntfV(*v, true, d); changed2 { *v = v2 } case map[bool]string: - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolStringV(v, false, d) case *map[bool]string: - v2, changed2 := fastpathTV.DecMapBoolStringV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolStringV(*v, true, d); changed2 { *v = v2 } case map[bool]uint: - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUintV(v, false, d) case *map[bool]uint: - v2, changed2 := fastpathTV.DecMapBoolUintV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUintV(*v, true, d); changed2 { *v = v2 } case map[bool]uint8: - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUint8V(v, false, d) case *map[bool]uint8: - v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUint8V(*v, true, d); changed2 { *v = v2 } case map[bool]uint16: - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUint16V(v, false, d) case *map[bool]uint16: - v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUint16V(*v, true, d); changed2 { *v = v2 } case map[bool]uint32: - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUint32V(v, false, d) case *map[bool]uint32: - v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUint32V(*v, true, d); changed2 { *v = v2 } case map[bool]uint64: - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUint64V(v, false, d) case *map[bool]uint64: - v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUint64V(*v, true, d); changed2 { *v = v2 } case map[bool]uintptr: - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolUintptrV(v, false, d) case *map[bool]uintptr: - v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolUintptrV(*v, true, d); changed2 { *v = v2 } case map[bool]int: - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolIntV(v, false, d) case *map[bool]int: - v2, changed2 := fastpathTV.DecMapBoolIntV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolIntV(*v, true, d); changed2 { *v = v2 } case map[bool]int8: - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolInt8V(v, false, d) case *map[bool]int8: - v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolInt8V(*v, true, d); changed2 { *v = v2 } case map[bool]int16: - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolInt16V(v, false, d) case *map[bool]int16: - v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolInt16V(*v, true, d); changed2 { *v = v2 } case map[bool]int32: - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolInt32V(v, false, d) case *map[bool]int32: - v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolInt32V(*v, true, d); changed2 { *v = v2 } case map[bool]int64: - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolInt64V(v, false, d) case *map[bool]int64: - v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolInt64V(*v, true, d); changed2 { *v = v2 } case map[bool]float32: - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolFloat32V(v, false, d) case *map[bool]float32: - v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolFloat32V(*v, true, d); changed2 { *v = v2 } case map[bool]float64: - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolFloat64V(v, false, d) case *map[bool]float64: - v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolFloat64V(*v, true, d); changed2 { *v = v2 } case map[bool]bool: - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, d) + fastpathTV.DecMapBoolBoolV(v, false, d) case *map[bool]bool: - v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, fastpathCheckNilFalse, true, d) - if changed2 { + if v2, changed2 := fastpathTV.DecMapBoolBoolV(*v, true, d); changed2 { *v = v2 } @@ -18121,38 +15563,849 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return true } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + + case *map[interface{}]string: + *v = nil + + case *map[interface{}]uint: + *v = nil + + case *map[interface{}]uint8: + *v = nil + + case *map[interface{}]uint16: + *v = nil + + case *map[interface{}]uint32: + *v = nil + + case *map[interface{}]uint64: + *v = nil + + case *map[interface{}]uintptr: + *v = nil + + case *map[interface{}]int: + *v = nil + + case *map[interface{}]int8: + *v = nil + + case *map[interface{}]int16: + *v = nil + + case *map[interface{}]int32: + *v = nil + + case *map[interface{}]int64: + *v = nil + + case *map[interface{}]float32: + *v = nil + + case *map[interface{}]float64: + *v = nil + + case *map[interface{}]bool: + *v = nil + + case *[]string: + *v = nil + + case *map[string]interface{}: + *v = nil + + case *map[string]string: + *v = nil + + case *map[string]uint: + *v = nil + + case *map[string]uint8: + *v = nil + + case *map[string]uint16: + *v = nil + + case *map[string]uint32: + *v = nil + + case *map[string]uint64: + *v = nil + + case *map[string]uintptr: + *v = nil + + case *map[string]int: + *v = nil + + case *map[string]int8: + *v = nil + + case *map[string]int16: + *v = nil + + case *map[string]int32: + *v = nil + + case *map[string]int64: + *v = nil + + case *map[string]float32: + *v = nil + + case *map[string]float64: + *v = nil + + case *map[string]bool: + *v = nil + + case *[]float32: + *v = nil + + case *map[float32]interface{}: + *v = nil + + case *map[float32]string: + *v = nil + + case *map[float32]uint: + *v = nil + + case *map[float32]uint8: + *v = nil + + case *map[float32]uint16: + *v = nil + + case *map[float32]uint32: + *v = nil + + case *map[float32]uint64: + *v = nil + + case *map[float32]uintptr: + *v = nil + + case *map[float32]int: + *v = nil + + case *map[float32]int8: + *v = nil + + case *map[float32]int16: + *v = nil + + case *map[float32]int32: + *v = nil + + case *map[float32]int64: + *v = nil + + case *map[float32]float32: + *v = nil + + case *map[float32]float64: + *v = nil + + case *map[float32]bool: + *v = nil + + case *[]float64: + *v = nil + + case *map[float64]interface{}: + *v = nil + + case *map[float64]string: + *v = nil + + case *map[float64]uint: + *v = nil + + case *map[float64]uint8: + *v = nil + + case *map[float64]uint16: + *v = nil + + case *map[float64]uint32: + *v = nil + + case *map[float64]uint64: + *v = nil + + case *map[float64]uintptr: + *v = nil + + case *map[float64]int: + *v = nil + + case *map[float64]int8: + *v = nil + + case *map[float64]int16: + *v = nil + + case *map[float64]int32: + *v = nil + + case *map[float64]int64: + *v = nil + + case *map[float64]float32: + *v = nil + + case *map[float64]float64: + *v = nil + + case *map[float64]bool: + *v = nil + + case *[]uint: + *v = nil + + case *map[uint]interface{}: + *v = nil + + case *map[uint]string: + *v = nil + + case *map[uint]uint: + *v = nil + + case *map[uint]uint8: + *v = nil + + case *map[uint]uint16: + *v = nil + + case *map[uint]uint32: + *v = nil + + case *map[uint]uint64: + *v = nil + + case *map[uint]uintptr: + *v = nil + + case *map[uint]int: + *v = nil + + case *map[uint]int8: + *v = nil + + case *map[uint]int16: + *v = nil + + case *map[uint]int32: + *v = nil + + case *map[uint]int64: + *v = nil + + case *map[uint]float32: + *v = nil + + case *map[uint]float64: + *v = nil + + case *map[uint]bool: + *v = nil + + case *map[uint8]interface{}: + *v = nil + + case *map[uint8]string: + *v = nil + + case *map[uint8]uint: + *v = nil + + case *map[uint8]uint8: + *v = nil + + case *map[uint8]uint16: + *v = nil + + case *map[uint8]uint32: + *v = nil + + case *map[uint8]uint64: + *v = nil + + case *map[uint8]uintptr: + *v = nil + + case *map[uint8]int: + *v = nil + + case *map[uint8]int8: + *v = nil + + case *map[uint8]int16: + *v = nil + + case *map[uint8]int32: + *v = nil + + case *map[uint8]int64: + *v = nil + + case *map[uint8]float32: + *v = nil + + case *map[uint8]float64: + *v = nil + + case *map[uint8]bool: + *v = nil + + case *[]uint16: + *v = nil + + case *map[uint16]interface{}: + *v = nil + + case *map[uint16]string: + *v = nil + + case *map[uint16]uint: + *v = nil + + case *map[uint16]uint8: + *v = nil + + case *map[uint16]uint16: + *v = nil + + case *map[uint16]uint32: + *v = nil + + case *map[uint16]uint64: + *v = nil + + case *map[uint16]uintptr: + *v = nil + + case *map[uint16]int: + *v = nil + + case *map[uint16]int8: + *v = nil + + case *map[uint16]int16: + *v = nil + + case *map[uint16]int32: + *v = nil + + case *map[uint16]int64: + *v = nil + + case *map[uint16]float32: + *v = nil + + case *map[uint16]float64: + *v = nil + + case *map[uint16]bool: + *v = nil + + case *[]uint32: + *v = nil + + case *map[uint32]interface{}: + *v = nil + + case *map[uint32]string: + *v = nil + + case *map[uint32]uint: + *v = nil + + case *map[uint32]uint8: + *v = nil + + case *map[uint32]uint16: + *v = nil + + case *map[uint32]uint32: + *v = nil + + case *map[uint32]uint64: + *v = nil + + case *map[uint32]uintptr: + *v = nil + + case *map[uint32]int: + *v = nil + + case *map[uint32]int8: + *v = nil + + case *map[uint32]int16: + *v = nil + + case *map[uint32]int32: + *v = nil + + case *map[uint32]int64: + *v = nil + + case *map[uint32]float32: + *v = nil + + case *map[uint32]float64: + *v = nil + + case *map[uint32]bool: + *v = nil + + case *[]uint64: + *v = nil + + case *map[uint64]interface{}: + *v = nil + + case *map[uint64]string: + *v = nil + + case *map[uint64]uint: + *v = nil + + case *map[uint64]uint8: + *v = nil + + case *map[uint64]uint16: + *v = nil + + case *map[uint64]uint32: + *v = nil + + case *map[uint64]uint64: + *v = nil + + case *map[uint64]uintptr: + *v = nil + + case *map[uint64]int: + *v = nil + + case *map[uint64]int8: + *v = nil + + case *map[uint64]int16: + *v = nil + + case *map[uint64]int32: + *v = nil + + case *map[uint64]int64: + *v = nil + + case *map[uint64]float32: + *v = nil + + case *map[uint64]float64: + *v = nil + + case *map[uint64]bool: + *v = nil + + case *[]uintptr: + *v = nil + + case *map[uintptr]interface{}: + *v = nil + + case *map[uintptr]string: + *v = nil + + case *map[uintptr]uint: + *v = nil + + case *map[uintptr]uint8: + *v = nil + + case *map[uintptr]uint16: + *v = nil + + case *map[uintptr]uint32: + *v = nil + + case *map[uintptr]uint64: + *v = nil + + case *map[uintptr]uintptr: + *v = nil + + case *map[uintptr]int: + *v = nil + + case *map[uintptr]int8: + *v = nil + + case *map[uintptr]int16: + *v = nil + + case *map[uintptr]int32: + *v = nil + + case *map[uintptr]int64: + *v = nil + + case *map[uintptr]float32: + *v = nil + + case *map[uintptr]float64: + *v = nil + + case *map[uintptr]bool: + *v = nil + + case *[]int: + *v = nil + + case *map[int]interface{}: + *v = nil + + case *map[int]string: + *v = nil + + case *map[int]uint: + *v = nil + + case *map[int]uint8: + *v = nil + + case *map[int]uint16: + *v = nil + + case *map[int]uint32: + *v = nil + + case *map[int]uint64: + *v = nil + + case *map[int]uintptr: + *v = nil + + case *map[int]int: + *v = nil + + case *map[int]int8: + *v = nil + + case *map[int]int16: + *v = nil + + case *map[int]int32: + *v = nil + + case *map[int]int64: + *v = nil + + case *map[int]float32: + *v = nil + + case *map[int]float64: + *v = nil + + case *map[int]bool: + *v = nil + + case *[]int8: + *v = nil + + case *map[int8]interface{}: + *v = nil + + case *map[int8]string: + *v = nil + + case *map[int8]uint: + *v = nil + + case *map[int8]uint8: + *v = nil + + case *map[int8]uint16: + *v = nil + + case *map[int8]uint32: + *v = nil + + case *map[int8]uint64: + *v = nil + + case *map[int8]uintptr: + *v = nil + + case *map[int8]int: + *v = nil + + case *map[int8]int8: + *v = nil + + case *map[int8]int16: + *v = nil + + case *map[int8]int32: + *v = nil + + case *map[int8]int64: + *v = nil + + case *map[int8]float32: + *v = nil + + case *map[int8]float64: + *v = nil + + case *map[int8]bool: + *v = nil + + case *[]int16: + *v = nil + + case *map[int16]interface{}: + *v = nil + + case *map[int16]string: + *v = nil + + case *map[int16]uint: + *v = nil + + case *map[int16]uint8: + *v = nil + + case *map[int16]uint16: + *v = nil + + case *map[int16]uint32: + *v = nil + + case *map[int16]uint64: + *v = nil + + case *map[int16]uintptr: + *v = nil + + case *map[int16]int: + *v = nil + + case *map[int16]int8: + *v = nil + + case *map[int16]int16: + *v = nil + + case *map[int16]int32: + *v = nil + + case *map[int16]int64: + *v = nil + + case *map[int16]float32: + *v = nil + + case *map[int16]float64: + *v = nil + + case *map[int16]bool: + *v = nil + + case *[]int32: + *v = nil + + case *map[int32]interface{}: + *v = nil + + case *map[int32]string: + *v = nil + + case *map[int32]uint: + *v = nil + + case *map[int32]uint8: + *v = nil + + case *map[int32]uint16: + *v = nil + + case *map[int32]uint32: + *v = nil + + case *map[int32]uint64: + *v = nil + + case *map[int32]uintptr: + *v = nil + + case *map[int32]int: + *v = nil + + case *map[int32]int8: + *v = nil + + case *map[int32]int16: + *v = nil + + case *map[int32]int32: + *v = nil + + case *map[int32]int64: + *v = nil + + case *map[int32]float32: + *v = nil + + case *map[int32]float64: + *v = nil + + case *map[int32]bool: + *v = nil + + case *[]int64: + *v = nil + + case *map[int64]interface{}: + *v = nil + + case *map[int64]string: + *v = nil + + case *map[int64]uint: + *v = nil + + case *map[int64]uint8: + *v = nil + + case *map[int64]uint16: + *v = nil + + case *map[int64]uint32: + *v = nil + + case *map[int64]uint64: + *v = nil + + case *map[int64]uintptr: + *v = nil + + case *map[int64]int: + *v = nil + + case *map[int64]int8: + *v = nil + + case *map[int64]int16: + *v = nil + + case *map[int64]int32: + *v = nil + + case *map[int64]int64: + *v = nil + + case *map[int64]float32: + *v = nil + + case *map[int64]float64: + *v = nil + + case *map[int64]bool: + *v = nil + + case *[]bool: + *v = nil + + case *map[bool]interface{}: + *v = nil + + case *map[bool]string: + *v = nil + + case *map[bool]uint: + *v = nil + + case *map[bool]uint8: + *v = nil + + case *map[bool]uint16: + *v = nil + + case *map[bool]uint32: + *v = nil + + case *map[bool]uint64: + *v = nil + + case *map[bool]uintptr: + *v = nil + + case *map[bool]int: + *v = nil + + case *map[bool]int8: + *v = nil + + case *map[bool]int16: + *v = nil + + case *map[bool]int32: + *v = nil + + case *map[bool]int64: + *v = nil + + case *map[bool]float32: + *v = nil + + case *map[bool]float64: + *v = nil + + case *map[bool]bool: + *v = nil + + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + // -- -- fast path functions -func (f *decFnInfo) fastpathDecSliceIntfR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]interface{}) - v, changed := fastpathTV.DecSliceIntfV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]interface{}) + if v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]interface{}) - fastpathTV.DecSliceIntfV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceIntfV(rv2i(rv).([]interface{}), !array, d) } } - -func (f fastpathT) DecSliceIntfX(vp *[]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + if v, changed := f.DecSliceIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, d *Decoder) (_ []interface{}, changed bool) { +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18167,126 +16420,82 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]interface{}, xlen) - } - } else { - v = make([]interface{}, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]interface{}, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - d.decode(&v[j]) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, nil) - slh.ElemContainerState(j) - d.decode(&v[j]) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []interface{}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]interface{}, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, nil) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - d.decode(&v[j]) - + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]interface{}, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + d.decode(&v[j]) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceStringR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]string) - v, changed := fastpathTV.DecSliceStringV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]string) + if v, changed := fastpathTV.DecSliceStringV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]string) - fastpathTV.DecSliceStringV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceStringV(rv2i(rv).([]string), !array, d) } } - -func (f fastpathT) DecSliceStringX(vp *[]string, checkNil bool, d *Decoder) { - v, changed := f.DecSliceStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + if v, changed := f.DecSliceStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d *Decoder) (_ []string, changed bool) { +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18301,125 +16510,82 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 16) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]string, xlen) - } - } else { - v = make([]string, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]string, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, "") - slh.ElemContainerState(j) - v[j] = dd.DecodeString() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []string{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]string, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, "") - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeString() + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]string, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceFloat32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float32) - v, changed := fastpathTV.DecSliceFloat32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float32) + if v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]float32) - fastpathTV.DecSliceFloat32V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceFloat32V(rv2i(rv).([]float32), !array, d) } } - -func (f fastpathT) DecSliceFloat32X(vp *[]float32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + if v, changed := f.DecSliceFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, d *Decoder) (_ []float32, changed bool) { +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18434,125 +16600,82 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float32, xlen) - } - } else { - v = make([]float32, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]float32, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = float32(dd.DecodeFloat(true)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float32, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = float32(dd.DecodeFloat(true)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]float32, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = float32(dd.DecodeFloat(true)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceFloat64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]float64) - v, changed := fastpathTV.DecSliceFloat64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]float64) + if v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]float64) - fastpathTV.DecSliceFloat64V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceFloat64V(rv2i(rv).([]float64), !array, d) } } - -func (f fastpathT) DecSliceFloat64X(vp *[]float64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + if v, changed := f.DecSliceFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, d *Decoder) (_ []float64, changed bool) { +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18567,125 +16690,82 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]float64, xlen) - } - } else { - v = make([]float64, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]float64, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeFloat(false) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []float64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]float64, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeFloat(false) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]float64, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeFloat(false) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceUintR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint) - v, changed := fastpathTV.DecSliceUintV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint) + if v, changed := fastpathTV.DecSliceUintV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]uint) - fastpathTV.DecSliceUintV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceUintV(rv2i(rv).([]uint), !array, d) } } - -func (f fastpathT) DecSliceUintX(vp *[]uint, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + if v, changed := f.DecSliceUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Decoder) (_ []uint, changed bool) { +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18700,125 +16780,82 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint, xlen) - } - } else { - v = make([]uint, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]uint, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint(dd.DecodeUint(uintBitsize)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]uint, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceUint16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint16) - v, changed := fastpathTV.DecSliceUint16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint16) + if v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]uint16) - fastpathTV.DecSliceUint16V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceUint16V(rv2i(rv).([]uint16), !array, d) } } - -func (f fastpathT) DecSliceUint16X(vp *[]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + if v, changed := f.DecSliceUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d *Decoder) (_ []uint16, changed bool) { +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18833,125 +16870,82 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint16, xlen) - } - } else { - v = make([]uint16, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]uint16, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint16(dd.DecodeUint(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint16, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint16(dd.DecodeUint(16)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]uint16, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint16(dd.DecodeUint(16)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceUint32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint32) - v, changed := fastpathTV.DecSliceUint32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint32) + if v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]uint32) - fastpathTV.DecSliceUint32V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceUint32V(rv2i(rv).([]uint32), !array, d) } } - -func (f fastpathT) DecSliceUint32X(vp *[]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + if v, changed := f.DecSliceUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d *Decoder) (_ []uint32, changed bool) { +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -18966,125 +16960,82 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint32, xlen) - } - } else { - v = make([]uint32, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]uint32, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uint32(dd.DecodeUint(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint32, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uint32(dd.DecodeUint(32)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]uint32, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uint32(dd.DecodeUint(32)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceUint64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uint64) - v, changed := fastpathTV.DecSliceUint64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uint64) + if v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]uint64) - fastpathTV.DecSliceUint64V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceUint64V(rv2i(rv).([]uint64), !array, d) } } - -func (f fastpathT) DecSliceUint64X(vp *[]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + if v, changed := f.DecSliceUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d *Decoder) (_ []uint64, changed bool) { +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19099,125 +17050,82 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uint64, xlen) - } - } else { - v = make([]uint64, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]uint64, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeUint(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uint64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uint64, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeUint(64) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]uint64, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeUint(64) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceUintptrR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]uintptr) - v, changed := fastpathTV.DecSliceUintptrV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]uintptr) + if v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]uintptr) - fastpathTV.DecSliceUintptrV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceUintptrV(rv2i(rv).([]uintptr), !array, d) } } - -func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecSliceUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + if v, changed := f.DecSliceUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, d *Decoder) (_ []uintptr, changed bool) { +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19232,125 +17140,82 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]uintptr, xlen) - } - } else { - v = make([]uintptr, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]uintptr, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = uintptr(dd.DecodeUint(uintBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []uintptr{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]uintptr, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]uintptr, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = uintptr(dd.DecodeUint(uintBitsize)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceIntR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int) - v, changed := fastpathTV.DecSliceIntV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int) + if v, changed := fastpathTV.DecSliceIntV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]int) - fastpathTV.DecSliceIntV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceIntV(rv2i(rv).([]int), !array, d) } } - -func (f fastpathT) DecSliceIntX(vp *[]int, checkNil bool, d *Decoder) { - v, changed := f.DecSliceIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + if v, changed := f.DecSliceIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decoder) (_ []int, changed bool) { +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19365,125 +17230,82 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int, xlen) - } - } else { - v = make([]int, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]int, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int(dd.DecodeInt(intBitsize)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int(dd.DecodeInt(intBitsize)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]int, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int(dd.DecodeInt(intBitsize)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceInt8R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int8) - v, changed := fastpathTV.DecSliceInt8V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int8) + if v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]int8) - fastpathTV.DecSliceInt8V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceInt8V(rv2i(rv).([]int8), !array, d) } } - -func (f fastpathT) DecSliceInt8X(vp *[]int8, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + if v, changed := f.DecSliceInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Decoder) (_ []int8, changed bool) { +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19498,125 +17320,82 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int8, xlen) - } - } else { - v = make([]int8, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]int8, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int8(dd.DecodeInt(8)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int8{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int8, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int8(dd.DecodeInt(8)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]int8, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int8(dd.DecodeInt(8)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceInt16R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int16) - v, changed := fastpathTV.DecSliceInt16V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int16) + if v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]int16) - fastpathTV.DecSliceInt16V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceInt16V(rv2i(rv).([]int16), !array, d) } } - -func (f fastpathT) DecSliceInt16X(vp *[]int16, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + if v, changed := f.DecSliceInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *Decoder) (_ []int16, changed bool) { +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19631,125 +17410,82 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 2) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int16, xlen) - } - } else { - v = make([]int16, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]int16, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int16(dd.DecodeInt(16)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int16{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int16, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int16(dd.DecodeInt(16)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]int16, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int16(dd.DecodeInt(16)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceInt32R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int32) - v, changed := fastpathTV.DecSliceInt32V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int32) + if v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]int32) - fastpathTV.DecSliceInt32V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceInt32V(rv2i(rv).([]int32), !array, d) } } - -func (f fastpathT) DecSliceInt32X(vp *[]int32, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + if v, changed := f.DecSliceInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *Decoder) (_ []int32, changed bool) { +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19764,125 +17500,82 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 4) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int32, xlen) - } - } else { - v = make([]int32, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]int32, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = int32(dd.DecodeInt(32)) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int32{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int32, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = int32(dd.DecodeInt(32)) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]int32, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = int32(dd.DecodeInt(32)) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceInt64R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]int64) - v, changed := fastpathTV.DecSliceInt64V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]int64) + if v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]int64) - fastpathTV.DecSliceInt64V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceInt64V(rv2i(rv).([]int64), !array, d) } } - -func (f fastpathT) DecSliceInt64X(vp *[]int64, checkNil bool, d *Decoder) { - v, changed := f.DecSliceInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + if v, changed := f.DecSliceInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *Decoder) (_ []int64, changed bool) { +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -19897,125 +17590,82 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 8) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]int64, xlen) - } - } else { - v = make([]int64, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]int64, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, 0) - slh.ElemContainerState(j) - v[j] = dd.DecodeInt(64) - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []int64{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]int64, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, 0) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeInt(64) + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]int64, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeInt(64) + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecSliceBoolR(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { - vp := rv.Addr().Interface().(*[]bool) - v, changed := fastpathTV.DecSliceBoolV(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]bool) + if v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d); changed { *vp = v } } else { - v := rv.Interface().([]bool) - fastpathTV.DecSliceBoolV(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.DecSliceBoolV(rv2i(rv).([]bool), !array, d) } } - -func (f fastpathT) DecSliceBoolX(vp *[]bool, checkNil bool, d *Decoder) { - v, changed := f.DecSliceBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + if v, changed := f.DecSliceBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Decoder) (_ []bool, changed bool) { +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { dd := d.d - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { @@ -20030,19323 +17680,15052 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec return v, changed } - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, 1) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]bool, xlen) - } - } else { - v = make([]bool, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]bool, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - if xtrunc { - for ; j < containerLenS; j++ { - v = append(v, false) - slh.ElemContainerState(j) - v[j] = dd.DecodeBool() - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() - if breakFound { - if canChange { - if v == nil { - v = []bool{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]bool, 1, 4) + v = v[:containerLenS] changed = true } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, false) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { - v[j] = dd.DecodeBool() + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) } else { - d.swallow() + xlen = 8 } - breakFound = dd.CheckBreak() + v = make([]bool, xlen) + changed = true } - if canChange && j < len(v) { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + v[j] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { v = v[:j] changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true } } slh.End() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]interface{}) - v, changed := fastpathTV.DecMapIntfIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + if v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]interface{}) - fastpathTV.DecMapIntfIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) } -func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + if v, changed := f.DecMapIntfIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, d *Decoder) (_ map[interface{}]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) v = make(map[interface{}]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk interface{} var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]string) - v, changed := fastpathTV.DecMapIntfStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + if v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]string) - fastpathTV.DecMapIntfStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) } -func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + if v, changed := f.DecMapIntfStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, d *Decoder) (_ map[interface{}]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) v = make(map[interface{}]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint) - v, changed := fastpathTV.DecMapIntfUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + if v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uint) - fastpathTV.DecMapIntfUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) } -func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + if v, changed := f.DecMapIntfUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, d *Decoder) (_ map[interface{}]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint8) - v, changed := fastpathTV.DecMapIntfUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + if v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uint8) - fastpathTV.DecMapIntfUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) } -func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + if v, changed := f.DecMapIntfUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, d *Decoder) (_ map[interface{}]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[interface{}]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint16) - v, changed := fastpathTV.DecMapIntfUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + if v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uint16) - fastpathTV.DecMapIntfUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) } -func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + if v, changed := f.DecMapIntfUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, d *Decoder) (_ map[interface{}]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[interface{}]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint32) - v, changed := fastpathTV.DecMapIntfUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + if v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uint32) - fastpathTV.DecMapIntfUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) } -func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + if v, changed := f.DecMapIntfUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, d *Decoder) (_ map[interface{}]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[interface{}]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uint64) - v, changed := fastpathTV.DecMapIntfUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + if v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uint64) - fastpathTV.DecMapIntfUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) } -func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + if v, changed := f.DecMapIntfUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, d *Decoder) (_ map[interface{}]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]uintptr) - v, changed := fastpathTV.DecMapIntfUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + if v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]uintptr) - fastpathTV.DecMapIntfUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) } -func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + if v, changed := f.DecMapIntfUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, d *Decoder) (_ map[interface{}]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int) - v, changed := fastpathTV.DecMapIntfIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + if v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]int) - fastpathTV.DecMapIntfIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) } -func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + if v, changed := f.DecMapIntfIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, d *Decoder) (_ map[interface{}]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int8) - v, changed := fastpathTV.DecMapIntfInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + if v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]int8) - fastpathTV.DecMapIntfInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) } -func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + if v, changed := f.DecMapIntfInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, d *Decoder) (_ map[interface{}]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[interface{}]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int16) - v, changed := fastpathTV.DecMapIntfInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + if v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]int16) - fastpathTV.DecMapIntfInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) } -func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + if v, changed := f.DecMapIntfInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, d *Decoder) (_ map[interface{}]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[interface{}]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int32) - v, changed := fastpathTV.DecMapIntfInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + if v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]int32) - fastpathTV.DecMapIntfInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) } -func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + if v, changed := f.DecMapIntfInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, d *Decoder) (_ map[interface{}]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[interface{}]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]int64) - v, changed := fastpathTV.DecMapIntfInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + if v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]int64) - fastpathTV.DecMapIntfInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) } -func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + if v, changed := f.DecMapIntfInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, d *Decoder) (_ map[interface{}]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float32) - v, changed := fastpathTV.DecMapIntfFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + if v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]float32) - fastpathTV.DecMapIntfFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) } -func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + if v, changed := f.DecMapIntfFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, d *Decoder) (_ map[interface{}]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[interface{}]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]float64) - v, changed := fastpathTV.DecMapIntfFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + if v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]float64) - fastpathTV.DecMapIntfFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) } -func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + if v, changed := f.DecMapIntfFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, d *Decoder) (_ map[interface{}]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[interface{}]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntfBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[interface{}]bool) - v, changed := fastpathTV.DecMapIntfBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + if v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[interface{}]bool) - fastpathTV.DecMapIntfBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) } -func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntfBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + if v, changed := f.DecMapIntfBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, d *Decoder) (_ map[interface{}]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[interface{}]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk interface{} var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) - } - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]interface{}) - v, changed := fastpathTV.DecMapStringIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + if v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]interface{}) - fastpathTV.DecMapStringIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) } -func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + if v, changed := f.DecMapStringIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, d *Decoder) (_ map[string]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) v = make(map[string]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk string var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]string) - v, changed := fastpathTV.DecMapStringStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + if v, changed := fastpathTV.DecMapStringStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]string) - fastpathTV.DecMapStringStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) } -func (f fastpathT) DecMapStringStringX(vp *map[string]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + if v, changed := f.DecMapStringStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringStringV(v map[string]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, d *Decoder) (_ map[string]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 32) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) v = make(map[string]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint) - v, changed := fastpathTV.DecMapStringUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + if v, changed := fastpathTV.DecMapStringUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uint) - fastpathTV.DecMapStringUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) } -func (f fastpathT) DecMapStringUintX(vp *map[string]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + if v, changed := f.DecMapStringUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUintV(v map[string]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, d *Decoder) (_ map[string]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint8) - v, changed := fastpathTV.DecMapStringUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + if v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uint8) - fastpathTV.DecMapStringUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) } -func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + if v, changed := f.DecMapStringUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, d *Decoder) (_ map[string]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[string]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint16) - v, changed := fastpathTV.DecMapStringUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + if v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uint16) - fastpathTV.DecMapStringUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) } -func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + if v, changed := f.DecMapStringUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, d *Decoder) (_ map[string]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[string]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint32) - v, changed := fastpathTV.DecMapStringUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + if v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uint32) - fastpathTV.DecMapStringUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) } -func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + if v, changed := f.DecMapStringUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, d *Decoder) (_ map[string]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[string]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uint64) - v, changed := fastpathTV.DecMapStringUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + if v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uint64) - fastpathTV.DecMapStringUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) } -func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + if v, changed := f.DecMapStringUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, d *Decoder) (_ map[string]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]uintptr) - v, changed := fastpathTV.DecMapStringUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + if v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]uintptr) - fastpathTV.DecMapStringUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) } -func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + if v, changed := f.DecMapStringUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, d *Decoder) (_ map[string]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int) - v, changed := fastpathTV.DecMapStringIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + if v, changed := fastpathTV.DecMapStringIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]int) - fastpathTV.DecMapStringIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) } -func (f fastpathT) DecMapStringIntX(vp *map[string]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + if v, changed := f.DecMapStringIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringIntV(v map[string]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, d *Decoder) (_ map[string]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int8) - v, changed := fastpathTV.DecMapStringInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + if v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]int8) - fastpathTV.DecMapStringInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) } -func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + if v, changed := f.DecMapStringInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringInt8V(v map[string]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, d *Decoder) (_ map[string]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[string]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int16) - v, changed := fastpathTV.DecMapStringInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + if v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]int16) - fastpathTV.DecMapStringInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) } -func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + if v, changed := f.DecMapStringInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringInt16V(v map[string]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, d *Decoder) (_ map[string]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[string]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int32) - v, changed := fastpathTV.DecMapStringInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + if v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]int32) - fastpathTV.DecMapStringInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) } -func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + if v, changed := f.DecMapStringInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringInt32V(v map[string]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, d *Decoder) (_ map[string]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[string]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]int64) - v, changed := fastpathTV.DecMapStringInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + if v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]int64) - fastpathTV.DecMapStringInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) } -func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + if v, changed := f.DecMapStringInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringInt64V(v map[string]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, d *Decoder) (_ map[string]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float32) - v, changed := fastpathTV.DecMapStringFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + if v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]float32) - fastpathTV.DecMapStringFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) } -func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + if v, changed := f.DecMapStringFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, d *Decoder) (_ map[string]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[string]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]float64) - v, changed := fastpathTV.DecMapStringFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + if v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]float64) - fastpathTV.DecMapStringFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) } -func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + if v, changed := f.DecMapStringFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, d *Decoder) (_ map[string]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[string]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapStringBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[string]bool) - v, changed := fastpathTV.DecMapStringBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + if v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[string]bool) - fastpathTV.DecMapStringBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) } -func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapStringBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + if v, changed := f.DecMapStringBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapStringBoolV(v map[string]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, d *Decoder) (_ map[string]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[string]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk string var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeString() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]interface{}) - v, changed := fastpathTV.DecMapFloat32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + if v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]interface{}) - fastpathTV.DecMapFloat32IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) } -func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat32IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, d *Decoder) (_ map[float32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[float32]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk float32 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]string) - v, changed := fastpathTV.DecMapFloat32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + if v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]string) - fastpathTV.DecMapFloat32StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) } -func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + if v, changed := f.DecMapFloat32StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, d *Decoder) (_ map[float32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[float32]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint) - v, changed := fastpathTV.DecMapFloat32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + if v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uint) - fastpathTV.DecMapFloat32UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) } -func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + if v, changed := f.DecMapFloat32UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, d *Decoder) (_ map[float32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint8) - v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + if v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uint8) - fastpathTV.DecMapFloat32Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) } -func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + if v, changed := f.DecMapFloat32Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, d *Decoder) (_ map[float32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[float32]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint16) - v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + if v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uint16) - fastpathTV.DecMapFloat32Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) } -func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + if v, changed := f.DecMapFloat32Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, d *Decoder) (_ map[float32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[float32]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint32) - v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + if v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uint32) - fastpathTV.DecMapFloat32Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) } -func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + if v, changed := f.DecMapFloat32Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, d *Decoder) (_ map[float32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[float32]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uint64) - v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + if v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uint64) - fastpathTV.DecMapFloat32Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) } -func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + if v, changed := f.DecMapFloat32Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, d *Decoder) (_ map[float32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]uintptr) - v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + if v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]uintptr) - fastpathTV.DecMapFloat32UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) } -func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat32UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, d *Decoder) (_ map[float32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int) - v, changed := fastpathTV.DecMapFloat32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + if v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]int) - fastpathTV.DecMapFloat32IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) } -func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + if v, changed := f.DecMapFloat32IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, d *Decoder) (_ map[float32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int8) - v, changed := fastpathTV.DecMapFloat32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + if v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]int8) - fastpathTV.DecMapFloat32Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) } -func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + if v, changed := f.DecMapFloat32Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, d *Decoder) (_ map[float32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[float32]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int16) - v, changed := fastpathTV.DecMapFloat32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + if v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]int16) - fastpathTV.DecMapFloat32Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) } -func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + if v, changed := f.DecMapFloat32Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, d *Decoder) (_ map[float32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[float32]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int32) - v, changed := fastpathTV.DecMapFloat32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + if v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]int32) - fastpathTV.DecMapFloat32Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) } -func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + if v, changed := f.DecMapFloat32Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, d *Decoder) (_ map[float32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[float32]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]int64) - v, changed := fastpathTV.DecMapFloat32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + if v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]int64) - fastpathTV.DecMapFloat32Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) } -func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + if v, changed := f.DecMapFloat32Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, d *Decoder) (_ map[float32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float32) - v, changed := fastpathTV.DecMapFloat32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + if v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]float32) - fastpathTV.DecMapFloat32Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) } -func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + if v, changed := f.DecMapFloat32Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, d *Decoder) (_ map[float32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[float32]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]float64) - v, changed := fastpathTV.DecMapFloat32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + if v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]float64) - fastpathTV.DecMapFloat32Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) } -func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + if v, changed := f.DecMapFloat32Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, d *Decoder) (_ map[float32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float32]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float32]bool) - v, changed := fastpathTV.DecMapFloat32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + if v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float32]bool) - fastpathTV.DecMapFloat32BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) } -func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat32BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + if v, changed := f.DecMapFloat32BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, d *Decoder) (_ map[float32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[float32]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float32 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = float32(dd.DecodeFloat(true)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = float32(dd.DecodeFloat(true)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]interface{}) - v, changed := fastpathTV.DecMapFloat64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + if v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]interface{}) - fastpathTV.DecMapFloat64IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) } -func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + if v, changed := f.DecMapFloat64IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, d *Decoder) (_ map[float64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[float64]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk float64 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]string) - v, changed := fastpathTV.DecMapFloat64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + if v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]string) - fastpathTV.DecMapFloat64StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) } -func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + if v, changed := f.DecMapFloat64StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, d *Decoder) (_ map[float64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[float64]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint) - v, changed := fastpathTV.DecMapFloat64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + if v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uint) - fastpathTV.DecMapFloat64UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) } -func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + if v, changed := f.DecMapFloat64UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, d *Decoder) (_ map[float64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint8) - v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + if v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uint8) - fastpathTV.DecMapFloat64Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) } -func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + if v, changed := f.DecMapFloat64Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, d *Decoder) (_ map[float64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[float64]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint16) - v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + if v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uint16) - fastpathTV.DecMapFloat64Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) } -func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + if v, changed := f.DecMapFloat64Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, d *Decoder) (_ map[float64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[float64]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint32) - v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + if v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uint32) - fastpathTV.DecMapFloat64Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) } -func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + if v, changed := f.DecMapFloat64Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, d *Decoder) (_ map[float64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float64]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uint64) - v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + if v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uint64) - fastpathTV.DecMapFloat64Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) } -func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + if v, changed := f.DecMapFloat64Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, d *Decoder) (_ map[float64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]uintptr) - v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + if v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]uintptr) - fastpathTV.DecMapFloat64UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) } -func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + if v, changed := f.DecMapFloat64UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, d *Decoder) (_ map[float64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int) - v, changed := fastpathTV.DecMapFloat64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + if v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]int) - fastpathTV.DecMapFloat64IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) } -func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + if v, changed := f.DecMapFloat64IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, d *Decoder) (_ map[float64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int8) - v, changed := fastpathTV.DecMapFloat64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + if v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]int8) - fastpathTV.DecMapFloat64Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) } -func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + if v, changed := f.DecMapFloat64Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, d *Decoder) (_ map[float64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[float64]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int16) - v, changed := fastpathTV.DecMapFloat64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + if v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]int16) - fastpathTV.DecMapFloat64Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) } -func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + if v, changed := f.DecMapFloat64Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, d *Decoder) (_ map[float64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[float64]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int32) - v, changed := fastpathTV.DecMapFloat64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + if v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]int32) - fastpathTV.DecMapFloat64Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) } -func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + if v, changed := f.DecMapFloat64Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, d *Decoder) (_ map[float64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float64]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]int64) - v, changed := fastpathTV.DecMapFloat64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + if v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]int64) - fastpathTV.DecMapFloat64Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) } -func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + if v, changed := f.DecMapFloat64Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, d *Decoder) (_ map[float64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float32) - v, changed := fastpathTV.DecMapFloat64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + if v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]float32) - fastpathTV.DecMapFloat64Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) } -func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + if v, changed := f.DecMapFloat64Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, d *Decoder) (_ map[float64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[float64]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]float64) - v, changed := fastpathTV.DecMapFloat64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + if v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]float64) - fastpathTV.DecMapFloat64Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) } -func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + if v, changed := f.DecMapFloat64Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, d *Decoder) (_ map[float64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[float64]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapFloat64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[float64]bool) - v, changed := fastpathTV.DecMapFloat64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + if v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[float64]bool) - fastpathTV.DecMapFloat64BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) } -func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapFloat64BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + if v, changed := f.DecMapFloat64BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, d *Decoder) (_ map[float64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[float64]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk float64 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeFloat(false) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = dd.DecodeFloat(false) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]interface{}) - v, changed := fastpathTV.DecMapUintIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + if v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]interface{}) - fastpathTV.DecMapUintIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) } -func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + if v, changed := f.DecMapUintIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, d *Decoder) (_ map[uint]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uint]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uint var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]string) - v, changed := fastpathTV.DecMapUintStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + if v, changed := fastpathTV.DecMapUintStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]string) - fastpathTV.DecMapUintStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) } -func (f fastpathT) DecMapUintStringX(vp *map[uint]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + if v, changed := f.DecMapUintStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintStringV(v map[uint]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, d *Decoder) (_ map[uint]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uint]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint) - v, changed := fastpathTV.DecMapUintUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + if v, changed := fastpathTV.DecMapUintUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uint) - fastpathTV.DecMapUintUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) } -func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + if v, changed := f.DecMapUintUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUintV(v map[uint]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, d *Decoder) (_ map[uint]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint8) - v, changed := fastpathTV.DecMapUintUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + if v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uint8) - fastpathTV.DecMapUintUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) } -func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + if v, changed := f.DecMapUintUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, d *Decoder) (_ map[uint]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint16) - v, changed := fastpathTV.DecMapUintUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + if v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uint16) - fastpathTV.DecMapUintUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) } -func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + if v, changed := f.DecMapUintUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, d *Decoder) (_ map[uint]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint32) - v, changed := fastpathTV.DecMapUintUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + if v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uint32) - fastpathTV.DecMapUintUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) } -func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + if v, changed := f.DecMapUintUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, d *Decoder) (_ map[uint]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uint64) - v, changed := fastpathTV.DecMapUintUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + if v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uint64) - fastpathTV.DecMapUintUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) } -func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + if v, changed := f.DecMapUintUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, d *Decoder) (_ map[uint]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]uintptr) - v, changed := fastpathTV.DecMapUintUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + if v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]uintptr) - fastpathTV.DecMapUintUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) } -func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + if v, changed := f.DecMapUintUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, d *Decoder) (_ map[uint]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int) - v, changed := fastpathTV.DecMapUintIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + if v, changed := fastpathTV.DecMapUintIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]int) - fastpathTV.DecMapUintIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) } -func (f fastpathT) DecMapUintIntX(vp *map[uint]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + if v, changed := f.DecMapUintIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintIntV(v map[uint]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, d *Decoder) (_ map[uint]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int8) - v, changed := fastpathTV.DecMapUintInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + if v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]int8) - fastpathTV.DecMapUintInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) } -func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + if v, changed := f.DecMapUintInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, d *Decoder) (_ map[uint]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int16) - v, changed := fastpathTV.DecMapUintInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + if v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]int16) - fastpathTV.DecMapUintInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) } -func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + if v, changed := f.DecMapUintInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, d *Decoder) (_ map[uint]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int32) - v, changed := fastpathTV.DecMapUintInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + if v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]int32) - fastpathTV.DecMapUintInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) } -func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + if v, changed := f.DecMapUintInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, d *Decoder) (_ map[uint]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]int64) - v, changed := fastpathTV.DecMapUintInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + if v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]int64) - fastpathTV.DecMapUintInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) } -func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + if v, changed := f.DecMapUintInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, d *Decoder) (_ map[uint]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float32) - v, changed := fastpathTV.DecMapUintFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + if v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]float32) - fastpathTV.DecMapUintFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) } -func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + if v, changed := f.DecMapUintFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, d *Decoder) (_ map[uint]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]float64) - v, changed := fastpathTV.DecMapUintFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + if v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]float64) - fastpathTV.DecMapUintFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) } -func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + if v, changed := f.DecMapUintFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, d *Decoder) (_ map[uint]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint]bool) - v, changed := fastpathTV.DecMapUintBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + if v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint]bool) - fastpathTV.DecMapUintBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) } -func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + if v, changed := f.DecMapUintBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, d *Decoder) (_ map[uint]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = uint(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]interface{}) - v, changed := fastpathTV.DecMapUint8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + if v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]interface{}) - fastpathTV.DecMapUint8IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) } -func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + if v, changed := f.DecMapUint8IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, d *Decoder) (_ map[uint8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[uint8]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uint8 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]string) - v, changed := fastpathTV.DecMapUint8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + if v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]string) - fastpathTV.DecMapUint8StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) } -func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + if v, changed := f.DecMapUint8StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, d *Decoder) (_ map[uint8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[uint8]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint) - v, changed := fastpathTV.DecMapUint8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + if v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uint) - fastpathTV.DecMapUint8UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) } -func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + if v, changed := f.DecMapUint8UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, d *Decoder) (_ map[uint8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint8) - v, changed := fastpathTV.DecMapUint8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + if v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uint8) - fastpathTV.DecMapUint8Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) } -func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + if v, changed := f.DecMapUint8Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, d *Decoder) (_ map[uint8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[uint8]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint16) - v, changed := fastpathTV.DecMapUint8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + if v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uint16) - fastpathTV.DecMapUint8Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) } -func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + if v, changed := f.DecMapUint8Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, d *Decoder) (_ map[uint8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[uint8]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint32) - v, changed := fastpathTV.DecMapUint8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + if v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uint32) - fastpathTV.DecMapUint8Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) } -func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + if v, changed := f.DecMapUint8Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, d *Decoder) (_ map[uint8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint8]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uint64) - v, changed := fastpathTV.DecMapUint8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + if v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uint64) - fastpathTV.DecMapUint8Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) } -func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + if v, changed := f.DecMapUint8Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, d *Decoder) (_ map[uint8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]uintptr) - v, changed := fastpathTV.DecMapUint8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + if v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]uintptr) - fastpathTV.DecMapUint8UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) } -func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + if v, changed := f.DecMapUint8UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, d *Decoder) (_ map[uint8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int) - v, changed := fastpathTV.DecMapUint8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + if v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]int) - fastpathTV.DecMapUint8IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) } -func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + if v, changed := f.DecMapUint8IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, d *Decoder) (_ map[uint8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int8) - v, changed := fastpathTV.DecMapUint8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + if v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]int8) - fastpathTV.DecMapUint8Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) } -func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + if v, changed := f.DecMapUint8Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, d *Decoder) (_ map[uint8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[uint8]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int16) - v, changed := fastpathTV.DecMapUint8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + if v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]int16) - fastpathTV.DecMapUint8Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) } -func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + if v, changed := f.DecMapUint8Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, d *Decoder) (_ map[uint8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[uint8]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int32) - v, changed := fastpathTV.DecMapUint8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + if v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]int32) - fastpathTV.DecMapUint8Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) } -func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + if v, changed := f.DecMapUint8Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, d *Decoder) (_ map[uint8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint8]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]int64) - v, changed := fastpathTV.DecMapUint8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + if v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]int64) - fastpathTV.DecMapUint8Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) } -func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + if v, changed := f.DecMapUint8Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, d *Decoder) (_ map[uint8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float32) - v, changed := fastpathTV.DecMapUint8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + if v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]float32) - fastpathTV.DecMapUint8Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) } -func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + if v, changed := f.DecMapUint8Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, d *Decoder) (_ map[uint8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint8]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]float64) - v, changed := fastpathTV.DecMapUint8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + if v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]float64) - fastpathTV.DecMapUint8Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) } -func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + if v, changed := f.DecMapUint8Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, d *Decoder) (_ map[uint8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint8]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint8]bool) - v, changed := fastpathTV.DecMapUint8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + if v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint8]bool) - fastpathTV.DecMapUint8BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) } -func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint8BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + if v, changed := f.DecMapUint8BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, d *Decoder) (_ map[uint8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[uint8]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint8 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint8(dd.DecodeUint(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = uint8(dd.DecodeUint(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]interface{}) - v, changed := fastpathTV.DecMapUint16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + if v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]interface{}) - fastpathTV.DecMapUint16IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) } -func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + if v, changed := f.DecMapUint16IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, d *Decoder) (_ map[uint16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[uint16]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uint16 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]string) - v, changed := fastpathTV.DecMapUint16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + if v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]string) - fastpathTV.DecMapUint16StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) } -func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + if v, changed := f.DecMapUint16StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, d *Decoder) (_ map[uint16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[uint16]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint) - v, changed := fastpathTV.DecMapUint16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + if v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uint) - fastpathTV.DecMapUint16UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) } -func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + if v, changed := f.DecMapUint16UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, d *Decoder) (_ map[uint16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint8) - v, changed := fastpathTV.DecMapUint16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + if v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uint8) - fastpathTV.DecMapUint16Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) } -func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + if v, changed := f.DecMapUint16Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, d *Decoder) (_ map[uint16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[uint16]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint16) - v, changed := fastpathTV.DecMapUint16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + if v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uint16) - fastpathTV.DecMapUint16Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) } -func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + if v, changed := f.DecMapUint16Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, d *Decoder) (_ map[uint16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) v = make(map[uint16]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint32) - v, changed := fastpathTV.DecMapUint16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + if v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uint32) - fastpathTV.DecMapUint16Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) } -func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + if v, changed := f.DecMapUint16Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, d *Decoder) (_ map[uint16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[uint16]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uint64) - v, changed := fastpathTV.DecMapUint16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + if v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uint64) - fastpathTV.DecMapUint16Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) } -func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + if v, changed := f.DecMapUint16Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, d *Decoder) (_ map[uint16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]uintptr) - v, changed := fastpathTV.DecMapUint16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + if v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]uintptr) - fastpathTV.DecMapUint16UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) } -func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + if v, changed := f.DecMapUint16UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, d *Decoder) (_ map[uint16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int) - v, changed := fastpathTV.DecMapUint16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + if v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]int) - fastpathTV.DecMapUint16IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) } -func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + if v, changed := f.DecMapUint16IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, d *Decoder) (_ map[uint16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int8) - v, changed := fastpathTV.DecMapUint16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + if v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]int8) - fastpathTV.DecMapUint16Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) } -func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + if v, changed := f.DecMapUint16Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, d *Decoder) (_ map[uint16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[uint16]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int16) - v, changed := fastpathTV.DecMapUint16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + if v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]int16) - fastpathTV.DecMapUint16Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) } -func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + if v, changed := f.DecMapUint16Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, d *Decoder) (_ map[uint16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) v = make(map[uint16]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int32) - v, changed := fastpathTV.DecMapUint16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + if v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]int32) - fastpathTV.DecMapUint16Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) } -func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + if v, changed := f.DecMapUint16Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, d *Decoder) (_ map[uint16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[uint16]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]int64) - v, changed := fastpathTV.DecMapUint16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + if v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]int64) - fastpathTV.DecMapUint16Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) } -func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + if v, changed := f.DecMapUint16Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, d *Decoder) (_ map[uint16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float32) - v, changed := fastpathTV.DecMapUint16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + if v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]float32) - fastpathTV.DecMapUint16Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) } -func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + if v, changed := f.DecMapUint16Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, d *Decoder) (_ map[uint16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[uint16]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]float64) - v, changed := fastpathTV.DecMapUint16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + if v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]float64) - fastpathTV.DecMapUint16Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) } -func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + if v, changed := f.DecMapUint16Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, d *Decoder) (_ map[uint16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint16]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint16]bool) - v, changed := fastpathTV.DecMapUint16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + if v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint16]bool) - fastpathTV.DecMapUint16BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) } -func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint16BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + if v, changed := f.DecMapUint16BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, d *Decoder) (_ map[uint16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[uint16]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint16 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint16(dd.DecodeUint(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = uint16(dd.DecodeUint(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]interface{}) - v, changed := fastpathTV.DecMapUint32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + if v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]interface{}) - fastpathTV.DecMapUint32IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) } -func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + if v, changed := f.DecMapUint32IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, d *Decoder) (_ map[uint32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[uint32]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uint32 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]string) - v, changed := fastpathTV.DecMapUint32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + if v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]string) - fastpathTV.DecMapUint32StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) } -func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + if v, changed := f.DecMapUint32StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, d *Decoder) (_ map[uint32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[uint32]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint) - v, changed := fastpathTV.DecMapUint32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + if v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uint) - fastpathTV.DecMapUint32UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) } -func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + if v, changed := f.DecMapUint32UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, d *Decoder) (_ map[uint32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint8) - v, changed := fastpathTV.DecMapUint32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + if v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uint8) - fastpathTV.DecMapUint32Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) } -func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + if v, changed := f.DecMapUint32Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, d *Decoder) (_ map[uint32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint32]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint16) - v, changed := fastpathTV.DecMapUint32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + if v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uint16) - fastpathTV.DecMapUint32Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) } -func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + if v, changed := f.DecMapUint32Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, d *Decoder) (_ map[uint32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[uint32]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint32) - v, changed := fastpathTV.DecMapUint32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + if v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uint32) - fastpathTV.DecMapUint32Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) } -func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + if v, changed := f.DecMapUint32Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, d *Decoder) (_ map[uint32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[uint32]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uint64) - v, changed := fastpathTV.DecMapUint32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + if v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uint64) - fastpathTV.DecMapUint32Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) } -func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + if v, changed := f.DecMapUint32Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, d *Decoder) (_ map[uint32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]uintptr) - v, changed := fastpathTV.DecMapUint32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + if v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]uintptr) - fastpathTV.DecMapUint32UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) } -func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + if v, changed := f.DecMapUint32UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, d *Decoder) (_ map[uint32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int) - v, changed := fastpathTV.DecMapUint32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + if v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]int) - fastpathTV.DecMapUint32IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) } -func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + if v, changed := f.DecMapUint32IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, d *Decoder) (_ map[uint32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int8) - v, changed := fastpathTV.DecMapUint32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + if v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]int8) - fastpathTV.DecMapUint32Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) } -func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + if v, changed := f.DecMapUint32Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, d *Decoder) (_ map[uint32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint32]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int16) - v, changed := fastpathTV.DecMapUint32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + if v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]int16) - fastpathTV.DecMapUint32Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) } -func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + if v, changed := f.DecMapUint32Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, d *Decoder) (_ map[uint32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[uint32]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int32) - v, changed := fastpathTV.DecMapUint32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + if v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]int32) - fastpathTV.DecMapUint32Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) } -func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + if v, changed := f.DecMapUint32Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, d *Decoder) (_ map[uint32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[uint32]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]int64) - v, changed := fastpathTV.DecMapUint32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + if v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]int64) - fastpathTV.DecMapUint32Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) } -func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + if v, changed := f.DecMapUint32Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, d *Decoder) (_ map[uint32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float32) - v, changed := fastpathTV.DecMapUint32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + if v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]float32) - fastpathTV.DecMapUint32Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) } -func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + if v, changed := f.DecMapUint32Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, d *Decoder) (_ map[uint32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[uint32]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]float64) - v, changed := fastpathTV.DecMapUint32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + if v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]float64) - fastpathTV.DecMapUint32Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) } -func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + if v, changed := f.DecMapUint32Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, d *Decoder) (_ map[uint32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint32]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint32]bool) - v, changed := fastpathTV.DecMapUint32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + if v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint32]bool) - fastpathTV.DecMapUint32BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) } -func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint32BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + if v, changed := f.DecMapUint32BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, d *Decoder) (_ map[uint32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[uint32]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint32 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uint32(dd.DecodeUint(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = uint32(dd.DecodeUint(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]interface{}) - v, changed := fastpathTV.DecMapUint64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + if v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]interface{}) - fastpathTV.DecMapUint64IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) } -func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + if v, changed := f.DecMapUint64IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, d *Decoder) (_ map[uint64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uint64]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uint64 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]string) - v, changed := fastpathTV.DecMapUint64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + if v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]string) - fastpathTV.DecMapUint64StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) } -func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + if v, changed := f.DecMapUint64StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, d *Decoder) (_ map[uint64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uint64]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint) - v, changed := fastpathTV.DecMapUint64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + if v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uint) - fastpathTV.DecMapUint64UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) } -func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + if v, changed := f.DecMapUint64UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, d *Decoder) (_ map[uint64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint8) - v, changed := fastpathTV.DecMapUint64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + if v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uint8) - fastpathTV.DecMapUint64Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) } -func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + if v, changed := f.DecMapUint64Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, d *Decoder) (_ map[uint64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint64]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint16) - v, changed := fastpathTV.DecMapUint64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + if v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uint16) - fastpathTV.DecMapUint64Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) } -func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + if v, changed := f.DecMapUint64Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, d *Decoder) (_ map[uint64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint64]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint32) - v, changed := fastpathTV.DecMapUint64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + if v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uint32) - fastpathTV.DecMapUint64Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) } -func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + if v, changed := f.DecMapUint64Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, d *Decoder) (_ map[uint64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint64]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uint64) - v, changed := fastpathTV.DecMapUint64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + if v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uint64) - fastpathTV.DecMapUint64Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) } -func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + if v, changed := f.DecMapUint64Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, d *Decoder) (_ map[uint64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]uintptr) - v, changed := fastpathTV.DecMapUint64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + if v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]uintptr) - fastpathTV.DecMapUint64UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) } -func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + if v, changed := f.DecMapUint64UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, d *Decoder) (_ map[uint64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int) - v, changed := fastpathTV.DecMapUint64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + if v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]int) - fastpathTV.DecMapUint64IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) } -func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + if v, changed := f.DecMapUint64IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, d *Decoder) (_ map[uint64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int8) - v, changed := fastpathTV.DecMapUint64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + if v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]int8) - fastpathTV.DecMapUint64Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) } -func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + if v, changed := f.DecMapUint64Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, d *Decoder) (_ map[uint64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint64]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int16) - v, changed := fastpathTV.DecMapUint64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + if v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]int16) - fastpathTV.DecMapUint64Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) } -func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + if v, changed := f.DecMapUint64Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, d *Decoder) (_ map[uint64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uint64]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int32) - v, changed := fastpathTV.DecMapUint64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + if v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]int32) - fastpathTV.DecMapUint64Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) } -func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + if v, changed := f.DecMapUint64Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, d *Decoder) (_ map[uint64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint64]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]int64) - v, changed := fastpathTV.DecMapUint64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + if v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]int64) - fastpathTV.DecMapUint64Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) } -func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + if v, changed := f.DecMapUint64Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, d *Decoder) (_ map[uint64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float32) - v, changed := fastpathTV.DecMapUint64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + if v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]float32) - fastpathTV.DecMapUint64Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) } -func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + if v, changed := f.DecMapUint64Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, d *Decoder) (_ map[uint64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uint64]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]float64) - v, changed := fastpathTV.DecMapUint64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + if v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]float64) - fastpathTV.DecMapUint64Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) } -func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + if v, changed := f.DecMapUint64Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, d *Decoder) (_ map[uint64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uint64]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUint64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uint64]bool) - v, changed := fastpathTV.DecMapUint64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + if v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uint64]bool) - fastpathTV.DecMapUint64BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) } -func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUint64BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + if v, changed := f.DecMapUint64BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, d *Decoder) (_ map[uint64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uint64]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uint64 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeUint(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = dd.DecodeUint(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]interface{}) - v, changed := fastpathTV.DecMapUintptrIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + if v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]interface{}) - fastpathTV.DecMapUintptrIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) } -func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + if v, changed := f.DecMapUintptrIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, d *Decoder) (_ map[uintptr]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uintptr]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk uintptr var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]string) - v, changed := fastpathTV.DecMapUintptrStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + if v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]string) - fastpathTV.DecMapUintptrStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) } -func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + if v, changed := f.DecMapUintptrStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, d *Decoder) (_ map[uintptr]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[uintptr]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint) - v, changed := fastpathTV.DecMapUintptrUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + if v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uint) - fastpathTV.DecMapUintptrUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) } -func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + if v, changed := f.DecMapUintptrUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, d *Decoder) (_ map[uintptr]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint8) - v, changed := fastpathTV.DecMapUintptrUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + if v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uint8) - fastpathTV.DecMapUintptrUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) } -func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + if v, changed := f.DecMapUintptrUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, d *Decoder) (_ map[uintptr]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uintptr]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint16) - v, changed := fastpathTV.DecMapUintptrUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + if v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uint16) - fastpathTV.DecMapUintptrUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) } -func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + if v, changed := f.DecMapUintptrUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, d *Decoder) (_ map[uintptr]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uintptr]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint32) - v, changed := fastpathTV.DecMapUintptrUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + if v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uint32) - fastpathTV.DecMapUintptrUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) } -func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + if v, changed := f.DecMapUintptrUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, d *Decoder) (_ map[uintptr]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uintptr]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uint64) - v, changed := fastpathTV.DecMapUintptrUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + if v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uint64) - fastpathTV.DecMapUintptrUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) } -func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + if v, changed := f.DecMapUintptrUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, d *Decoder) (_ map[uintptr]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]uintptr) - v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + if v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]uintptr) - fastpathTV.DecMapUintptrUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) } -func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + if v, changed := f.DecMapUintptrUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, d *Decoder) (_ map[uintptr]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int) - v, changed := fastpathTV.DecMapUintptrIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + if v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]int) - fastpathTV.DecMapUintptrIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) } -func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + if v, changed := f.DecMapUintptrIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, d *Decoder) (_ map[uintptr]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int8) - v, changed := fastpathTV.DecMapUintptrInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + if v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]int8) - fastpathTV.DecMapUintptrInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) } -func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + if v, changed := f.DecMapUintptrInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, d *Decoder) (_ map[uintptr]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uintptr]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int16) - v, changed := fastpathTV.DecMapUintptrInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + if v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]int16) - fastpathTV.DecMapUintptrInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) } -func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + if v, changed := f.DecMapUintptrInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, d *Decoder) (_ map[uintptr]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[uintptr]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int32) - v, changed := fastpathTV.DecMapUintptrInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + if v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]int32) - fastpathTV.DecMapUintptrInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) } -func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + if v, changed := f.DecMapUintptrInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, d *Decoder) (_ map[uintptr]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uintptr]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]int64) - v, changed := fastpathTV.DecMapUintptrInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + if v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]int64) - fastpathTV.DecMapUintptrInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) } -func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + if v, changed := f.DecMapUintptrInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, d *Decoder) (_ map[uintptr]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float32) - v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + if v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]float32) - fastpathTV.DecMapUintptrFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) } -func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + if v, changed := f.DecMapUintptrFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, d *Decoder) (_ map[uintptr]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[uintptr]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]float64) - v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + if v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]float64) - fastpathTV.DecMapUintptrFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) } -func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + if v, changed := f.DecMapUintptrFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, d *Decoder) (_ map[uintptr]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[uintptr]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapUintptrBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[uintptr]bool) - v, changed := fastpathTV.DecMapUintptrBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + if v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[uintptr]bool) - fastpathTV.DecMapUintptrBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) } -func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapUintptrBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + if v, changed := f.DecMapUintptrBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, d *Decoder) (_ map[uintptr]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[uintptr]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk uintptr var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = uintptr(dd.DecodeUint(uintBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = uintptr(dd.DecodeUint(uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]interface{}) - v, changed := fastpathTV.DecMapIntIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + if v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]interface{}) - fastpathTV.DecMapIntIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) } -func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + if v, changed := f.DecMapIntIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, d *Decoder) (_ map[int]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[int]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk int var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]string) - v, changed := fastpathTV.DecMapIntStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + if v, changed := fastpathTV.DecMapIntStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]string) - fastpathTV.DecMapIntStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) } -func (f fastpathT) DecMapIntStringX(vp *map[int]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + if v, changed := f.DecMapIntStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntStringV(v map[int]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, d *Decoder) (_ map[int]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[int]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint) - v, changed := fastpathTV.DecMapIntUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + if v, changed := fastpathTV.DecMapIntUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uint) - fastpathTV.DecMapIntUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) } -func (f fastpathT) DecMapIntUintX(vp *map[int]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + if v, changed := f.DecMapIntUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUintV(v map[int]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, d *Decoder) (_ map[int]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint8) - v, changed := fastpathTV.DecMapIntUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + if v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uint8) - fastpathTV.DecMapIntUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) } -func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + if v, changed := f.DecMapIntUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, d *Decoder) (_ map[int]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint16) - v, changed := fastpathTV.DecMapIntUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + if v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uint16) - fastpathTV.DecMapIntUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) } -func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + if v, changed := f.DecMapIntUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, d *Decoder) (_ map[int]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint32) - v, changed := fastpathTV.DecMapIntUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + if v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uint32) - fastpathTV.DecMapIntUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) } -func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + if v, changed := f.DecMapIntUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, d *Decoder) (_ map[int]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uint64) - v, changed := fastpathTV.DecMapIntUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + if v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uint64) - fastpathTV.DecMapIntUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) } -func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + if v, changed := f.DecMapIntUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, d *Decoder) (_ map[int]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]uintptr) - v, changed := fastpathTV.DecMapIntUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + if v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]uintptr) - fastpathTV.DecMapIntUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) } -func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + if v, changed := f.DecMapIntUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, d *Decoder) (_ map[int]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int) - v, changed := fastpathTV.DecMapIntIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + if v, changed := fastpathTV.DecMapIntIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]int) - fastpathTV.DecMapIntIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) } -func (f fastpathT) DecMapIntIntX(vp *map[int]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + if v, changed := f.DecMapIntIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntIntV(v map[int]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, d *Decoder) (_ map[int]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int8) - v, changed := fastpathTV.DecMapIntInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + if v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]int8) - fastpathTV.DecMapIntInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) } -func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + if v, changed := f.DecMapIntInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntInt8V(v map[int]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, d *Decoder) (_ map[int]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int16) - v, changed := fastpathTV.DecMapIntInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + if v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]int16) - fastpathTV.DecMapIntInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) } -func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + if v, changed := f.DecMapIntInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntInt16V(v map[int]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, d *Decoder) (_ map[int]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int32) - v, changed := fastpathTV.DecMapIntInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + if v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]int32) - fastpathTV.DecMapIntInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) } -func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + if v, changed := f.DecMapIntInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntInt32V(v map[int]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, d *Decoder) (_ map[int]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]int64) - v, changed := fastpathTV.DecMapIntInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + if v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]int64) - fastpathTV.DecMapIntInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) } -func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + if v, changed := f.DecMapIntInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntInt64V(v map[int]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, d *Decoder) (_ map[int]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float32) - v, changed := fastpathTV.DecMapIntFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + if v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]float32) - fastpathTV.DecMapIntFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) } -func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + if v, changed := f.DecMapIntFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, d *Decoder) (_ map[int]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]float64) - v, changed := fastpathTV.DecMapIntFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + if v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]float64) - fastpathTV.DecMapIntFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) } -func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + if v, changed := f.DecMapIntFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, d *Decoder) (_ map[int]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapIntBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int]bool) - v, changed := fastpathTV.DecMapIntBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + if v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int]bool) - fastpathTV.DecMapIntBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) } -func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapIntBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + if v, changed := f.DecMapIntBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapIntBoolV(v map[int]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, d *Decoder) (_ map[int]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int(dd.DecodeInt(intBitsize)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = int(dd.DecodeInt(intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]interface{}) - v, changed := fastpathTV.DecMapInt8IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + if v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]interface{}) - fastpathTV.DecMapInt8IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) } -func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + if v, changed := f.DecMapInt8IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, d *Decoder) (_ map[int8]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[int8]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk int8 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]string) - v, changed := fastpathTV.DecMapInt8StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + if v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]string) - fastpathTV.DecMapInt8StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) } -func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + if v, changed := f.DecMapInt8StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8StringV(v map[int8]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, d *Decoder) (_ map[int8]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[int8]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint) - v, changed := fastpathTV.DecMapInt8UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + if v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uint) - fastpathTV.DecMapInt8UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) } -func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + if v, changed := f.DecMapInt8UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, d *Decoder) (_ map[int8]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint8) - v, changed := fastpathTV.DecMapInt8Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + if v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uint8) - fastpathTV.DecMapInt8Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) } -func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + if v, changed := f.DecMapInt8Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, d *Decoder) (_ map[int8]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[int8]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint16) - v, changed := fastpathTV.DecMapInt8Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + if v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uint16) - fastpathTV.DecMapInt8Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) } -func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + if v, changed := f.DecMapInt8Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, d *Decoder) (_ map[int8]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[int8]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint32) - v, changed := fastpathTV.DecMapInt8Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + if v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uint32) - fastpathTV.DecMapInt8Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) } -func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + if v, changed := f.DecMapInt8Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, d *Decoder) (_ map[int8]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int8]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uint64) - v, changed := fastpathTV.DecMapInt8Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + if v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uint64) - fastpathTV.DecMapInt8Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) } -func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + if v, changed := f.DecMapInt8Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, d *Decoder) (_ map[int8]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]uintptr) - v, changed := fastpathTV.DecMapInt8UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + if v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]uintptr) - fastpathTV.DecMapInt8UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) } -func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + if v, changed := f.DecMapInt8UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, d *Decoder) (_ map[int8]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int) - v, changed := fastpathTV.DecMapInt8IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + if v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]int) - fastpathTV.DecMapInt8IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) } -func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + if v, changed := f.DecMapInt8IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8IntV(v map[int8]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, d *Decoder) (_ map[int8]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int8) - v, changed := fastpathTV.DecMapInt8Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + if v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]int8) - fastpathTV.DecMapInt8Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) } -func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + if v, changed := f.DecMapInt8Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, d *Decoder) (_ map[int8]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[int8]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int16) - v, changed := fastpathTV.DecMapInt8Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + if v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]int16) - fastpathTV.DecMapInt8Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) } -func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + if v, changed := f.DecMapInt8Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, d *Decoder) (_ map[int8]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[int8]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int32) - v, changed := fastpathTV.DecMapInt8Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + if v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]int32) - fastpathTV.DecMapInt8Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) } -func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + if v, changed := f.DecMapInt8Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, d *Decoder) (_ map[int8]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int8]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]int64) - v, changed := fastpathTV.DecMapInt8Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + if v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]int64) - fastpathTV.DecMapInt8Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) } -func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + if v, changed := f.DecMapInt8Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, d *Decoder) (_ map[int8]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float32) - v, changed := fastpathTV.DecMapInt8Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + if v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]float32) - fastpathTV.DecMapInt8Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) } -func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + if v, changed := f.DecMapInt8Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, d *Decoder) (_ map[int8]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int8]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]float64) - v, changed := fastpathTV.DecMapInt8Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + if v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]float64) - fastpathTV.DecMapInt8Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) } -func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + if v, changed := f.DecMapInt8Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, d *Decoder) (_ map[int8]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int8]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt8BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int8]bool) - v, changed := fastpathTV.DecMapInt8BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + if v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int8]bool) - fastpathTV.DecMapInt8BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) } -func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt8BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + if v, changed := f.DecMapInt8BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, d *Decoder) (_ map[int8]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[int8]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int8 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int8(dd.DecodeInt(8)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = int8(dd.DecodeInt(8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]interface{}) - v, changed := fastpathTV.DecMapInt16IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + if v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]interface{}) - fastpathTV.DecMapInt16IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) } -func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + if v, changed := f.DecMapInt16IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, d *Decoder) (_ map[int16]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[int16]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk int16 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]string) - v, changed := fastpathTV.DecMapInt16StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + if v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]string) - fastpathTV.DecMapInt16StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) } -func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + if v, changed := f.DecMapInt16StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16StringV(v map[int16]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, d *Decoder) (_ map[int16]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 18) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) v = make(map[int16]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint) - v, changed := fastpathTV.DecMapInt16UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + if v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uint) - fastpathTV.DecMapInt16UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) } -func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + if v, changed := f.DecMapInt16UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, d *Decoder) (_ map[int16]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint8) - v, changed := fastpathTV.DecMapInt16Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + if v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uint8) - fastpathTV.DecMapInt16Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) } -func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + if v, changed := f.DecMapInt16Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, d *Decoder) (_ map[int16]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[int16]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint16) - v, changed := fastpathTV.DecMapInt16Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + if v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uint16) - fastpathTV.DecMapInt16Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) } -func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + if v, changed := f.DecMapInt16Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, d *Decoder) (_ map[int16]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) v = make(map[int16]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint32) - v, changed := fastpathTV.DecMapInt16Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + if v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uint32) - fastpathTV.DecMapInt16Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) } -func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + if v, changed := f.DecMapInt16Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, d *Decoder) (_ map[int16]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[int16]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uint64) - v, changed := fastpathTV.DecMapInt16Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + if v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uint64) - fastpathTV.DecMapInt16Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) } -func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + if v, changed := f.DecMapInt16Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, d *Decoder) (_ map[int16]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]uintptr) - v, changed := fastpathTV.DecMapInt16UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + if v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]uintptr) - fastpathTV.DecMapInt16UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) } -func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + if v, changed := f.DecMapInt16UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, d *Decoder) (_ map[int16]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int) - v, changed := fastpathTV.DecMapInt16IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + if v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]int) - fastpathTV.DecMapInt16IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) } -func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + if v, changed := f.DecMapInt16IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16IntV(v map[int16]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, d *Decoder) (_ map[int16]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int8) - v, changed := fastpathTV.DecMapInt16Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + if v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]int8) - fastpathTV.DecMapInt16Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) } -func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + if v, changed := f.DecMapInt16Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, d *Decoder) (_ map[int16]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[int16]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int16) - v, changed := fastpathTV.DecMapInt16Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + if v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]int16) - fastpathTV.DecMapInt16Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) } -func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + if v, changed := f.DecMapInt16Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, d *Decoder) (_ map[int16]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 4) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) v = make(map[int16]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int32) - v, changed := fastpathTV.DecMapInt16Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + if v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]int32) - fastpathTV.DecMapInt16Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) } -func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + if v, changed := f.DecMapInt16Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, d *Decoder) (_ map[int16]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[int16]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]int64) - v, changed := fastpathTV.DecMapInt16Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + if v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]int64) - fastpathTV.DecMapInt16Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) } -func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + if v, changed := f.DecMapInt16Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, d *Decoder) (_ map[int16]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float32) - v, changed := fastpathTV.DecMapInt16Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + if v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]float32) - fastpathTV.DecMapInt16Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) } -func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + if v, changed := f.DecMapInt16Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, d *Decoder) (_ map[int16]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[int16]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]float64) - v, changed := fastpathTV.DecMapInt16Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + if v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]float64) - fastpathTV.DecMapInt16Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) } -func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + if v, changed := f.DecMapInt16Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, d *Decoder) (_ map[int16]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int16]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt16BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int16]bool) - v, changed := fastpathTV.DecMapInt16BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + if v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int16]bool) - fastpathTV.DecMapInt16BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) } -func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt16BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + if v, changed := f.DecMapInt16BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, d *Decoder) (_ map[int16]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[int16]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int16 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int16(dd.DecodeInt(16)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = int16(dd.DecodeInt(16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]interface{}) - v, changed := fastpathTV.DecMapInt32IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + if v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]interface{}) - fastpathTV.DecMapInt32IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) } -func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + if v, changed := f.DecMapInt32IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, d *Decoder) (_ map[int32]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[int32]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk int32 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]string) - v, changed := fastpathTV.DecMapInt32StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + if v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]string) - fastpathTV.DecMapInt32StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) } -func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + if v, changed := f.DecMapInt32StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32StringV(v map[int32]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, d *Decoder) (_ map[int32]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 20) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) v = make(map[int32]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint) - v, changed := fastpathTV.DecMapInt32UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + if v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uint) - fastpathTV.DecMapInt32UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) } -func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + if v, changed := f.DecMapInt32UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, d *Decoder) (_ map[int32]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint8) - v, changed := fastpathTV.DecMapInt32Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + if v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uint8) - fastpathTV.DecMapInt32Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) } -func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + if v, changed := f.DecMapInt32Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, d *Decoder) (_ map[int32]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int32]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint16) - v, changed := fastpathTV.DecMapInt32Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + if v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uint16) - fastpathTV.DecMapInt32Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) } -func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + if v, changed := f.DecMapInt32Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, d *Decoder) (_ map[int32]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[int32]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint32) - v, changed := fastpathTV.DecMapInt32Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + if v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uint32) - fastpathTV.DecMapInt32Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) } -func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + if v, changed := f.DecMapInt32Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, d *Decoder) (_ map[int32]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[int32]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uint64) - v, changed := fastpathTV.DecMapInt32Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + if v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uint64) - fastpathTV.DecMapInt32Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) } -func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + if v, changed := f.DecMapInt32Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, d *Decoder) (_ map[int32]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]uintptr) - v, changed := fastpathTV.DecMapInt32UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + if v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]uintptr) - fastpathTV.DecMapInt32UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) } -func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + if v, changed := f.DecMapInt32UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, d *Decoder) (_ map[int32]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int) - v, changed := fastpathTV.DecMapInt32IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + if v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]int) - fastpathTV.DecMapInt32IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) } -func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + if v, changed := f.DecMapInt32IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32IntV(v map[int32]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, d *Decoder) (_ map[int32]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int8) - v, changed := fastpathTV.DecMapInt32Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + if v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]int8) - fastpathTV.DecMapInt32Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) } -func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + if v, changed := f.DecMapInt32Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, d *Decoder) (_ map[int32]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int32]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int16) - v, changed := fastpathTV.DecMapInt32Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + if v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]int16) - fastpathTV.DecMapInt32Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) } -func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + if v, changed := f.DecMapInt32Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, d *Decoder) (_ map[int32]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 6) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) v = make(map[int32]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int32) - v, changed := fastpathTV.DecMapInt32Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + if v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]int32) - fastpathTV.DecMapInt32Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) } -func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + if v, changed := f.DecMapInt32Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, d *Decoder) (_ map[int32]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[int32]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]int64) - v, changed := fastpathTV.DecMapInt32Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + if v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]int64) - fastpathTV.DecMapInt32Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) } -func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + if v, changed := f.DecMapInt32Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, d *Decoder) (_ map[int32]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float32) - v, changed := fastpathTV.DecMapInt32Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + if v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]float32) - fastpathTV.DecMapInt32Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) } -func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + if v, changed := f.DecMapInt32Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, d *Decoder) (_ map[int32]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 8) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) v = make(map[int32]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]float64) - v, changed := fastpathTV.DecMapInt32Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + if v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]float64) - fastpathTV.DecMapInt32Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) } -func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + if v, changed := f.DecMapInt32Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, d *Decoder) (_ map[int32]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int32]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt32BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int32]bool) - v, changed := fastpathTV.DecMapInt32BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + if v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int32]bool) - fastpathTV.DecMapInt32BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) } -func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt32BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + if v, changed := f.DecMapInt32BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, d *Decoder) (_ map[int32]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[int32]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int32 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = int32(dd.DecodeInt(32)) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = int32(dd.DecodeInt(32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64IntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]interface{}) - v, changed := fastpathTV.DecMapInt64IntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + if v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]interface{}) - fastpathTV.DecMapInt64IntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) } -func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + if v, changed := f.DecMapInt64IntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, d *Decoder) (_ map[int64]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[int64]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk int64 var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64StringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]string) - v, changed := fastpathTV.DecMapInt64StringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + if v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]string) - fastpathTV.DecMapInt64StringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) } -func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64StringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + if v, changed := f.DecMapInt64StringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64StringV(v map[int64]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, d *Decoder) (_ map[int64]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 24) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) v = make(map[int64]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64UintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint) - v, changed := fastpathTV.DecMapInt64UintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + if v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uint) - fastpathTV.DecMapInt64UintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) } -func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + if v, changed := f.DecMapInt64UintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, d *Decoder) (_ map[int64]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Uint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint8) - v, changed := fastpathTV.DecMapInt64Uint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + if v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uint8) - fastpathTV.DecMapInt64Uint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) } -func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + if v, changed := f.DecMapInt64Uint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, d *Decoder) (_ map[int64]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int64]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Uint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint16) - v, changed := fastpathTV.DecMapInt64Uint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + if v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uint16) - fastpathTV.DecMapInt64Uint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) } -func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + if v, changed := f.DecMapInt64Uint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, d *Decoder) (_ map[int64]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int64]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Uint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint32) - v, changed := fastpathTV.DecMapInt64Uint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + if v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uint32) - fastpathTV.DecMapInt64Uint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) } -func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + if v, changed := f.DecMapInt64Uint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, d *Decoder) (_ map[int64]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int64]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Uint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uint64) - v, changed := fastpathTV.DecMapInt64Uint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + if v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uint64) - fastpathTV.DecMapInt64Uint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) } -func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Uint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + if v, changed := f.DecMapInt64Uint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, d *Decoder) (_ map[int64]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64UintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]uintptr) - v, changed := fastpathTV.DecMapInt64UintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + if v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]uintptr) - fastpathTV.DecMapInt64UintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) } -func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64UintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + if v, changed := f.DecMapInt64UintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, d *Decoder) (_ map[int64]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64IntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int) - v, changed := fastpathTV.DecMapInt64IntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + if v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]int) - fastpathTV.DecMapInt64IntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) } -func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64IntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + if v, changed := f.DecMapInt64IntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64IntV(v map[int64]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, d *Decoder) (_ map[int64]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Int8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int8) - v, changed := fastpathTV.DecMapInt64Int8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + if v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]int8) - fastpathTV.DecMapInt64Int8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) } -func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + if v, changed := f.DecMapInt64Int8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, d *Decoder) (_ map[int64]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int64]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Int16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int16) - v, changed := fastpathTV.DecMapInt64Int16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + if v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]int16) - fastpathTV.DecMapInt64Int16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) } -func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + if v, changed := f.DecMapInt64Int16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, d *Decoder) (_ map[int64]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 10) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) v = make(map[int64]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Int32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int32) - v, changed := fastpathTV.DecMapInt64Int32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + if v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]int32) - fastpathTV.DecMapInt64Int32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) } -func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + if v, changed := f.DecMapInt64Int32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, d *Decoder) (_ map[int64]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int64]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Int64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]int64) - v, changed := fastpathTV.DecMapInt64Int64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + if v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]int64) - fastpathTV.DecMapInt64Int64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) } -func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Int64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + if v, changed := f.DecMapInt64Int64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, d *Decoder) (_ map[int64]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Float32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float32) - v, changed := fastpathTV.DecMapInt64Float32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + if v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]float32) - fastpathTV.DecMapInt64Float32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) } -func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + if v, changed := f.DecMapInt64Float32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, d *Decoder) (_ map[int64]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 12) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) v = make(map[int64]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64Float64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]float64) - v, changed := fastpathTV.DecMapInt64Float64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + if v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]float64) - fastpathTV.DecMapInt64Float64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) } -func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64Float64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + if v, changed := f.DecMapInt64Float64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, d *Decoder) (_ map[int64]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 16) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) v = make(map[int64]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapInt64BoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[int64]bool) - v, changed := fastpathTV.DecMapInt64BoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + if v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[int64]bool) - fastpathTV.DecMapInt64BoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) } -func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapInt64BoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + if v, changed := f.DecMapInt64BoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, d *Decoder) (_ map[int64]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[int64]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk int64 var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeInt(64) - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = dd.DecodeInt(64) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolIntfR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]interface{}) - v, changed := fastpathTV.DecMapBoolIntfV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + if v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]interface{}) - fastpathTV.DecMapBoolIntfV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) } -func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntfV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + if v, changed := f.DecMapBoolIntfV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, d *Decoder) (_ map[bool]interface{}, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[bool]interface{}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } mapGet := !d.h.MapValueReset && !d.h.InterfaceReset var mk bool var mv interface{} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] - } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - if mapGet { - mv = v[mk] + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) } else { - mv = nil - } - d.decode(&mv) - if v != nil { - v[mk] = mv + v[mk] = nil } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolStringR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]string) - v, changed := fastpathTV.DecMapBoolStringV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + if v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]string) - fastpathTV.DecMapBoolStringV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) } -func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolStringV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + if v, changed := f.DecMapBoolStringV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolStringV(v map[bool]string, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, d *Decoder) (_ map[bool]string, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 17) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) v = make(map[bool]string, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv string - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeString() - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUintR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint) - v, changed := fastpathTV.DecMapBoolUintV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + if v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uint) - fastpathTV.DecMapBoolUintV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) } -func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + if v, changed := f.DecMapBoolUintV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, d *Decoder) (_ map[bool]uint, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]uint, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uint - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUint8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint8) - v, changed := fastpathTV.DecMapBoolUint8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + if v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uint8) - fastpathTV.DecMapBoolUint8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) } -func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + if v, changed := f.DecMapBoolUint8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, d *Decoder) (_ map[bool]uint8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[bool]uint8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uint8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint8(dd.DecodeUint(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint8(dd.DecodeUint(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUint16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint16) - v, changed := fastpathTV.DecMapBoolUint16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + if v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uint16) - fastpathTV.DecMapBoolUint16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) } -func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + if v, changed := f.DecMapBoolUint16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, d *Decoder) (_ map[bool]uint16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[bool]uint16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uint16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint16(dd.DecodeUint(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint16(dd.DecodeUint(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUint32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint32) - v, changed := fastpathTV.DecMapBoolUint32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + if v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uint32) - fastpathTV.DecMapBoolUint32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) } -func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + if v, changed := f.DecMapBoolUint32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, d *Decoder) (_ map[bool]uint32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[bool]uint32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uint32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uint32(dd.DecodeUint(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uint32(dd.DecodeUint(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUint64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uint64) - v, changed := fastpathTV.DecMapBoolUint64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + if v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uint64) - fastpathTV.DecMapBoolUint64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) } -func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUint64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + if v, changed := f.DecMapBoolUint64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, d *Decoder) (_ map[bool]uint64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]uint64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uint64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeUint(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeUint(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolUintptrR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]uintptr) - v, changed := fastpathTV.DecMapBoolUintptrV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + if v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]uintptr) - fastpathTV.DecMapBoolUintptrV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) } -func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolUintptrV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + if v, changed := f.DecMapBoolUintptrV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, d *Decoder) (_ map[bool]uintptr, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]uintptr, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv uintptr - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = uintptr(dd.DecodeUint(uintBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = uintptr(dd.DecodeUint(uintBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolIntR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int) - v, changed := fastpathTV.DecMapBoolIntV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + if v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]int) - fastpathTV.DecMapBoolIntV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) } -func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolIntV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + if v, changed := f.DecMapBoolIntV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolIntV(v map[bool]int, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, d *Decoder) (_ map[bool]int, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]int, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv int - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int(dd.DecodeInt(intBitsize)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int(dd.DecodeInt(intBitsize)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolInt8R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int8) - v, changed := fastpathTV.DecMapBoolInt8V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + if v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]int8) - fastpathTV.DecMapBoolInt8V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) } -func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt8V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + if v, changed := f.DecMapBoolInt8V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, d *Decoder) (_ map[bool]int8, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[bool]int8, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv int8 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int8(dd.DecodeInt(8)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int8(dd.DecodeInt(8)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolInt16R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int16) - v, changed := fastpathTV.DecMapBoolInt16V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + if v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]int16) - fastpathTV.DecMapBoolInt16V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) } -func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt16V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + if v, changed := f.DecMapBoolInt16V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, d *Decoder) (_ map[bool]int16, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 3) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) v = make(map[bool]int16, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv int16 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int16(dd.DecodeInt(16)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int16(dd.DecodeInt(16)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolInt32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int32) - v, changed := fastpathTV.DecMapBoolInt32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + if v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]int32) - fastpathTV.DecMapBoolInt32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) } -func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + if v, changed := f.DecMapBoolInt32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, d *Decoder) (_ map[bool]int32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[bool]int32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv int32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = int32(dd.DecodeInt(32)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = int32(dd.DecodeInt(32)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolInt64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]int64) - v, changed := fastpathTV.DecMapBoolInt64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + if v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]int64) - fastpathTV.DecMapBoolInt64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) } -func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolInt64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + if v, changed := f.DecMapBoolInt64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, d *Decoder) (_ map[bool]int64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]int64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv int64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeInt(64) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeInt(64) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolFloat32R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float32) - v, changed := fastpathTV.DecMapBoolFloat32V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + if v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]float32) - fastpathTV.DecMapBoolFloat32V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) } -func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat32V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + if v, changed := f.DecMapBoolFloat32V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, d *Decoder) (_ map[bool]float32, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 5) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) v = make(map[bool]float32, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv float32 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = float32(dd.DecodeFloat(true)) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = float32(dd.DecodeFloat(true)) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolFloat64R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]float64) - v, changed := fastpathTV.DecMapBoolFloat64V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + if v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]float64) - fastpathTV.DecMapBoolFloat64V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) } -func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolFloat64V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + if v, changed := f.DecMapBoolFloat64V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, d *Decoder) (_ map[bool]float64, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 9) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) v = make(map[bool]float64, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv float64 - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeFloat(false) - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 } + continue + } + mv = dd.DecodeFloat(false) + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } -func (f *decFnInfo) fastpathDecMapBoolBoolR(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[bool]bool) - v, changed := fastpathTV.DecMapBoolBoolV(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + if v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[bool]bool) - fastpathTV.DecMapBoolBoolV(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) } -func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, checkNil bool, d *Decoder) { - v, changed := f.DecMapBoolBoolV(*vp, checkNil, true, d) - if changed { +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + if v, changed := f.DecMapBoolBoolV(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, checkNil bool, canChange bool, +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, d *Decoder) (_ map[bool]bool, changed bool) { - dd := d.d - cr := d.cr - - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } + dd, esep := d.d, d.hh.hasElemSeparators() containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, 2) + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) v = make(map[bool]bool, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } var mk bool var mv bool - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { - cr.sendContainerState(containerMapKey) - } - mk = dd.DecodeBool() - if cr != nil { - cr.sendContainerState(containerMapValue) - } - mv = dd.DecodeBool() - if v != nil { - v[mk] = mv + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv } } - if cr != nil { - cr.sendContainerState(containerMapEnd) - } + dd.ReadMapEnd() return v, changed } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl index c3ffdf93d9..73ed332a80 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -40,9 +40,6 @@ import ( const fastpathEnabled = true -const fastpathCheckNilFalse = false // for reflect -const fastpathCheckNilTrue = true // for type switch - type fastpathT struct {} var fastpathTV fastpathT @@ -50,8 +47,8 @@ var fastpathTV fastpathT type fastpathE struct { rtid uintptr rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) } type fastpathA [{{ .FastpathLen }}]fastpathE @@ -83,20 +80,29 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { + if useLookupRecognizedTypes && recognizedRtidsLoaded { + panic("recognizedRtidsLoaded = true - cannot happen") + } i := 0 - fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) - xptr := reflect.ValueOf(xrt).Pointer() + xptr := rt2id(xrt) + if useLookupRecognizedTypes { + recognizedRtids = append(recognizedRtids, xptr) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(xrt))) + } fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} i++ return } {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} - fn([]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} {{range .Values}}{{if not .Primitive}}{{if .MapKey }} - fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encFnInfo).{{ .MethodNamePfx "fastpathEnc" false }}R, (*decFnInfo).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} + fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R){{end}}{{end}}{{end}} sort.Sort(fastpathAslice(fastpathAV[:])) } @@ -109,10 +115,10 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e){{if not .MapKey }} + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e){{if not .MapKey }} case *[]{{ .Elem }}:{{else}} case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) {{end}}{{end}} default: _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) @@ -121,13 +127,14 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return true } +{{/* **** removing this block, as they are never called directly **** func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) case *[]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) {{end}}{{end}}{{end}} default: _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) @@ -140,9 +147,9 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if .MapKey }} case map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, fastpathCheckNilTrue, e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e) case *map[{{ .MapKey }}]{{ .Elem }}: - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, fastpathCheckNilTrue, e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e) {{end}}{{end}}{{end}} default: _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) @@ -150,72 +157,60 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { } return true } +*/}} // -- -- fast path functions {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { if f.ti.mbs { - fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv2i(rv).([]{{ .Elem }}), e) } else { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).([]{{ .Elem }}), e) } } -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeArrayStart(len(v)) +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) for _, v2 := range v { - if cr != nil { cr.sendContainerState(containerArrayElem) } + if esep { ee.WriteArrayElem() } {{ encmd .Elem "v2"}} } - if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} + ee.WriteArrayEnd() } -func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() if len(v)%2 == 1 { e.errorf("mapBySlice requires even slice length, but got %v", len(v)) return } - ee.EncodeMapStart(len(v) / 2) + ee.WriteMapStart(len(v) / 2) for j, v2 := range v { - if cr != nil { + if esep { if j%2 == 0 { - cr.sendContainerState(containerMapKey) + ee.WriteMapElemKey() } else { - cr.sendContainerState(containerMapValue) + ee.WriteMapElemValue() } } {{ encmd .Elem "v2"}} } - if cr != nil { cr.sendContainerState(containerMapEnd) } + ee.WriteMapEnd() } {{end}}{{end}}{{end}} {{range .Values}}{{if not .Primitive}}{{if .MapKey }} -func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}), fastpathCheckNilFalse, f.e) +func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) } -func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, e *Encoder) { - ee := e.e - cr := e.cr - if checkNil && v == nil { - ee.EncodeNil() - return - } - ee.EncodeMapStart(len(v)) +func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) { + if v == nil { e.e.EncodeNil(); return } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) {{if eq .MapKey "string"}}asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 {{end}}if e.h.Canonical { {{if eq .MapKey "interface{}"}}{{/* out of band @@ -234,9 +229,9 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele } sort.Sort(bytesISlice(v2)) for j := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } + if esep { ee.WriteMapElemKey() } e.asis(v2[j].v) - if cr != nil { cr.sendContainerState(containerMapValue) } + if esep { ee.WriteMapElemValue() } e.encode(v[v2[j].i]) } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v)) var i int @@ -246,28 +241,28 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele } sort.Sort({{ sorttype .MapKey false}}(v2)) for _, k2 := range v2 { - if cr != nil { cr.sendContainerState(containerMapKey) } + if esep { ee.WriteMapElemKey() } {{if eq .MapKey "string"}}if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) }{{else}}{{ $y := printf "%s(k2)" .MapKey }}{{ encmd .MapKey $y }}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } + if esep { ee.WriteMapElemValue() } {{ $y := printf "v[%s(k2)]" .MapKey }}{{ encmd .Elem $y }} } {{end}} } else { for k2, v2 := range v { - if cr != nil { cr.sendContainerState(containerMapKey) } + if esep { ee.WriteMapElemKey() } {{if eq .MapKey "string"}}if asSymbols { ee.EncodeSymbol(k2) } else { ee.EncodeString(c_UTF8, k2) }{{else}}{{ encmd .MapKey "k2"}}{{end}} - if cr != nil { cr.sendContainerState(containerMapValue) } + if esep { ee.WriteMapElemValue() } {{ encmd .Elem "v2"}} } } - if cr != nil { cr.sendContainerState(containerMapEnd) }{{/* ee.EncodeEnd() */}} + ee.WriteMapEnd() } {{end}}{{end}}{{end}} @@ -280,11 +275,10 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} case map[{{ .MapKey }}]{{ .Elem }}:{{end}} - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, d){{if not .MapKey }} - case *[]{{ .Elem }}:{{else}} - case *map[{{ .MapKey }}]{{ .Elem }}:{{end}} - v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, fastpathCheckNilFalse, true, d) - if changed2 { + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, false, d){{if not .MapKey }} + case *[]{{ .Elem }}: {{else}} + case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} + if v2, changed2 := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*v, true, d); changed2 { *v = v2 } {{end}}{{end}} @@ -295,6 +289,20 @@ func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return true } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { + switch v := iv.(type) { +{{range .Values}}{{if not .Primitive}}{{if not .MapKey }} + case *[]{{ .Elem }}: {{else}} + case *map[{{ .MapKey }}]{{ .Elem }}: {{end}} + *v = nil +{{end}}{{end}} + default: + _ = v // TODO: workaround https://github.com/golang/go/issues/12927 (remove after go 1.6 release) + return false + } + return true +} + // -- -- fast path functions {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} {{/* @@ -303,40 +311,28 @@ Slices can change if they - are addressable (from a ptr) - are settable (e.g. contained in an interface{}) */}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - array := f.seq == seqTypeArray - if !array && rv.CanAddr() { {{/* // CanSet => CanAddr + Exported */}} - vp := rv.Addr().Interface().(*[]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, !array, f.d) - if changed { - *vp = v +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + var vp = rv2i(rv).(*[]{{ .Elem }}) + if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, !array, d); changed { + *vp = v } } else { - v := rv.Interface().([]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).([]{{ .Elem }}), !array, d) } } - -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) { + if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil bool, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, canChange bool, d *Decoder) (_ []{{ .Elem }}, changed bool) { dd := d.d {{/* // if dd.isContainerType(valueTypeNil) { dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - slh, containerLenS := d.decSliceHelperStart() if containerLenS == 0 { if canChange { - if v == nil { + if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] @@ -346,98 +342,62 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b slh.End() return v, changed } - - if containerLenS > 0 { - x2read := containerLenS - var xtrunc bool + + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { if containerLenS > cap(v) { - if canChange { {{/* - // fast-path is for "basic" immutable types, so no need to copy them over - // s := make([]{{ .Elem }}, decInferLen(containerLenS, d.h.MaxInitLen)) - // copy(s, v[:cap(v)]) - // v = s */}} - var xlen int - xlen, xtrunc = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) - if xtrunc { - if xlen <= cap(v) { - v = v[:xlen] - } else { - v = make([]{{ .Elem }}, xlen) - } - } else { - v = make([]{{ .Elem }}, xlen) - } - changed = true + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + if xlen <= cap(v) { + v = v[:xlen] } else { - d.arrayCannotExpand(len(v), containerLenS) + v = make([]{{ .Elem }}, xlen) } - x2read = len(v) + changed = true } else if containerLenS != len(v) { - if canChange { - v = v[:containerLenS] - changed = true - } - } {{/* // all checks done. cannot go past len. */}} - j := 0 - for ; j < x2read; j++ { - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - if xtrunc { {{/* // means canChange=true, changed=true already. */}} - for ; j < containerLenS; j++ { - v = append(v, {{ zerocmd .Elem }}) - slh.ElemContainerState(j) - {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } - } else if !canChange { - for ; j < containerLenS; j++ { - slh.ElemContainerState(j) - d.swallow() - } - } - } else { - breakFound := dd.CheckBreak() {{/* check break first, so we can initialize v with a capacity of 4 if necessary */}} - if breakFound { - if canChange { - if v == nil { - v = []{{ .Elem }}{} - } else if len(v) != 0 { - v = v[:0] - } - changed = true - } - slh.End() - return v, changed - } - if cap(v) == 0 { - v = make([]{{ .Elem }}, 1, 4) - changed = true - } - j := 0 - for ; !breakFound; j++ { - if j >= len(v) { - if canChange { - v = append(v, {{ zerocmd .Elem }}) - changed = true - } else { - d.arrayCannotExpand(len(v), j+1) - } - } - slh.ElemContainerState(j) - if j < len(v) { {{/* // all checks done. cannot go past len. */}} - {{ if eq .Elem "interface{}" }}d.decode(&v[j]) - {{ else }}v[j] = {{ decmd .Elem }}{{ end }} - } else { - d.swallow() - } - breakFound = dd.CheckBreak() - } - if canChange && j < len(v) { - v = v[:j] + v = v[:containerLenS] changed = true } } - slh.End() + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) + } else { + xlen = 8 + } + v = make([]{{ .Elem }}, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, {{ zerocmd .Elem }}) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else { + {{ if eq .Elem "interface{}" }}d.decode(&v[j]){{ else }}v[j] = {{ decmd .Elem }}{{ end }} + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]{{ .Elem }}, 0) + changed = true + } + } + slh.End() return v, changed } @@ -450,77 +410,58 @@ Maps can change if they are - addressable (from a ptr) - settable (e.g. contained in an interface{}) */}} -func (f *decFnInfo) {{ .MethodNamePfx "fastpathDec" false }}R(rv reflect.Value) { - if rv.CanAddr() { - vp := rv.Addr().Interface().(*map[{{ .MapKey }}]{{ .Elem }}) - v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, fastpathCheckNilFalse, true, f.d) - if changed { +func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }}) + if v, changed := fastpathTV.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { *vp = v } - } else { - v := rv.Interface().(map[{{ .MapKey }}]{{ .Elem }}) - fastpathTV.{{ .MethodNamePfx "Dec" false }}V(v, fastpathCheckNilFalse, false, f.d) + return } + fastpathTV.{{ .MethodNamePfx "Dec" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), false, d) } -func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, d *Decoder) { - v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, checkNil, true, d) - if changed { +func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) { + if v, changed := f.{{ .MethodNamePfx "Dec" false }}V(*vp, true, d); changed { *vp = v } } -func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, checkNil bool, canChange bool, +func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, canChange bool, d *Decoder) (_ map[{{ .MapKey }}]{{ .Elem }}, changed bool) { - dd := d.d - cr := d.cr + dd, esep := d.d, d.hh.hasElemSeparators() {{/* // if dd.isContainerType(valueTypeNil) {dd.TryDecodeAsNil() */}} - if checkNil && dd.TryDecodeAsNil() { - if v != nil { - changed = true - } - return nil, changed - } - containerLen := dd.ReadMapStart() if canChange && v == nil { - xlen, _ := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) + xlen := decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}) v = make(map[{{ .MapKey }}]{{ .Elem }}, xlen) changed = true } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } {{ if eq .Elem "interface{}" }}mapGet := !d.h.MapValueReset && !d.h.InterfaceReset{{end}} var mk {{ .MapKey }} var mv {{ .Elem }} - if containerLen > 0 { - for j := 0; j < containerLen; j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { dd.ReadMapElemKey() } + {{ if eq .MapKey "interface{}" }}mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} + }{{ else }}mk = {{ decmd .MapKey }}{{ end }} + if esep { dd.ReadMapElemValue() } + if dd.TryDecodeAsNil() { + if d.h.DeleteOnNilMapValue { delete(v, mk) } else { v[mk] = {{ zerocmd .Elem }} } + continue } - } else if containerLen < 0 { - for j := 0; !dd.CheckBreak(); j++ { - if cr != nil { cr.sendContainerState(containerMapKey) } - {{ if eq .MapKey "interface{}" }}mk = nil - d.decode(&mk) - if bv, bok := mk.([]byte); bok { - mk = d.string(bv) {{/* // maps cannot have []byte as key. switch to string. */}} - }{{ else }}mk = {{ decmd .MapKey }}{{ end }} - if cr != nil { cr.sendContainerState(containerMapValue) } - {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } - d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} - if v != nil { - v[mk] = mv - } + {{ if eq .Elem "interface{}" }}if mapGet { mv = v[mk] } else { mv = nil } + d.decode(&mv){{ else }}mv = {{ decmd .Elem }}{{ end }} + if v != nil { + v[mk] = mv } } - if cr != nil { cr.sendContainerState(containerMapEnd) } + dd.ReadMapEnd() return v, changed } diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go index 63e5911452..9573d64ab0 100644 --- a/vendor/github.com/ugorji/go/codec/fast-path.not.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -14,17 +14,18 @@ const fastpathEnabled = false // This tag disables fastpath during build, allowing for faster build, test execution, // short-program runs, etc. -func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } -func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } -func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}, d *Decoder) bool { return false } type fastpathT struct{} type fastpathE struct { rtid uintptr rt reflect.Type - encfn func(*encFnInfo, reflect.Value) - decfn func(*decFnInfo, reflect.Value) + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) } type fastpathA [0]fastpathE diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl index 32df54144c..d9940c0ad6 100644 --- a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -13,92 +13,65 @@ if {{var "l"}} == 0 { {{var "v"}} = make({{ .CTyp }}, 0) {{var "c"}} = true } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int; _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] } else { {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) } {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + } else if {{var "l"}} != len({{var "v"}}) { {{var "v"}} = {{var "v"}}[:{{var "l"}}] {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = 8 + } + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} + // {{var "dn"}} = r.TryDecodeAsNil() + {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + {{else}} + // if indefinite, etc, then expand the slice if necessary + var {{var "db"}} bool if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { + if {{var "db"}} { z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} } - {{end}} + {{end}} } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { {{var "v"}} = {{var "v"}}[:{{var "j"}}] {{var "c"}} = true } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} + {{var "v"}} = make([]{{ .Typ }}, 0) {{var "c"}} = true - }{{end}} + } {{end}} } {{var "h"}}.End() {{if not isArray }}if {{var "c"}} { *{{ .Varname }} = {{var "v"}} }{{end}} + diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl index 77400e0a11..8323b54940 100644 --- a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl @@ -2,21 +2,22 @@ {{var "l"}} := r.ReadMapStart() {{var "bh"}} := z.DecBasicHandle() if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) *{{ .Varname }} = {{var "v"}} } var {{var "mk"}} {{ .KTyp }} var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool if {{var "bh"}}.MapValueReset { {{if decElemKindPtr}}{{var "mg"}} = true {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } {{else if not decElemKindImmutable}}{{var "mg"}} = true {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { {{var "mk"}} = string({{var "bv"}}) @@ -28,31 +29,14 @@ for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { {{var "ms"}} = false } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { {{var "v"}}[{{var "mk"}}] = {{var "mv"}} } } } // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go index eb0bdad357..c89a14d676 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -15,6 +15,9 @@ import ( "reflect" ) +// GenVersion is the current version of codecgen. +const GenVersion = 8 + // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by // library users. They WILL change continuously and without notice. @@ -26,12 +29,14 @@ import ( // to perform encoding or decoding of primitives or known slice or map types. // GenHelperEncoder is exported so that it can be used externally by codecgen. +// // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { return genHelperEncoder{e: e}, e.e } // GenHelperDecoder is exported so that it can be used externally by codecgen. +// // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { return genHelperDecoder{d: d}, d.d @@ -56,13 +61,14 @@ func (f genHelperEncoder) EncBasicHandle() *BasicHandle { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() + return f.e.cf.be // f.e.hh.isBinaryEncoding() } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncFallback(iv interface{}) { // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -85,7 +91,7 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) + f.e.rawBytes(iv) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -98,7 +104,7 @@ func (f genHelperEncoder) TimeRtidIfBinc() uintptr { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js + return f.e.cf.js } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -112,7 +118,7 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) { if rt.Kind() == reflect.Ptr { rt = rt.Elem() } - rtid := reflect.ValueOf(rt).Pointer() + rtid := rt2id(rt) if xfFn := f.e.h.getExt(rtid); xfFn != nil { f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) return true @@ -120,13 +126,6 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) { return false } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} - // ---------------- DECODER FOLLOWS ----------------- // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -152,7 +151,12 @@ func (f genHelperDecoder) DecScratchBuffer() []byte { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false, false) + // f.d.decodeValueFallback(rv) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -172,7 +176,7 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) if fnerr != nil { panic(fnerr) } @@ -180,7 +184,7 @@ func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // bs := f.dd.DecodeStringAsBytes() // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) if fnerr != nil { @@ -190,7 +194,7 @@ func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) if fnerr != nil { panic(fnerr) } @@ -198,7 +202,7 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() + return f.d.rawBytes() } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* @@ -222,7 +226,7 @@ func (f genHelperDecoder) HasExtensions() bool { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecExt(v interface{}) (r bool) { rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() + rtid := rt2id(rt) if xfFn := f.d.h.getExt(rtid); xfFn != nil { f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) return true @@ -231,13 +235,11 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) { } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { return decInferLen(clen, maxlen, unit) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } +func (f genHelperDecoder) StringView(v []byte) string { + return stringView(v) } diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl index ad99f6671b..79b6145c99 100644 --- a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -15,6 +15,9 @@ import ( "reflect" ) +// GenVersion is the current version of codecgen. +const GenVersion = {{ .Version }} + // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by // library users. They WILL change continuously and without notice. @@ -26,12 +29,14 @@ import ( // to perform encoding or decoding of primitives or known slice or map types. // GenHelperEncoder is exported so that it can be used externally by codecgen. +// // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) { return genHelperEncoder{e:e}, e.e } // GenHelperDecoder is exported so that it can be used externally by codecgen. +// // Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) { return genHelperDecoder{d:d}, d.d @@ -56,12 +61,13 @@ func (f genHelperEncoder) EncBasicHandle() *BasicHandle { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncBinary() bool { - return f.e.be // f.e.hh.isBinaryEncoding() + return f.e.cf.be // f.e.hh.isBinaryEncoding() } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncFallback(iv interface{}) { // println(">>>>>>>>> EncFallback") - f.e.encodeI(iv, false, false) + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { @@ -80,7 +86,7 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) EncRaw(iv Raw) { - f.e.raw(iv) + f.e.rawBytes(iv) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) TimeRtidIfBinc() uintptr { @@ -91,7 +97,7 @@ func (f genHelperEncoder) TimeRtidIfBinc() uintptr { } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) IsJSONHandle() bool { - return f.e.js + return f.e.cf.js } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) HasExtensions() bool { @@ -103,19 +109,13 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) { if rt.Kind() == reflect.Ptr { rt = rt.Elem() } - rtid := reflect.ValueOf(rt).Pointer() + rtid := rt2id(rt) if xfFn := f.e.h.getExt(rtid); xfFn != nil { f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) return true } return false } -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncSendContainerState(c containerState) { - if f.e.cr != nil { - f.e.cr.sendContainerState(c) - } -} // ---------------- DECODER FOLLOWS ----------------- @@ -138,7 +138,12 @@ func (f genHelperDecoder) DecScratchBuffer() []byte { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { // println(">>>>>>>>> DecFallback") - f.d.decodeI(iv, chkPtr, false, false, false) + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false, false) + // f.d.decodeValueFallback(rv) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { @@ -154,14 +159,14 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { - fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true)) + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) if fnerr != nil { panic(fnerr) } } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { - // bs := f.dd.DecodeBytes(f.d.b[:], true, true) + // bs := f.dd.DecodeStringAsBytes() // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) if fnerr != nil { @@ -170,14 +175,14 @@ func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { - fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true)) + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) if fnerr != nil { panic(fnerr) } } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecRaw() []byte { - return f.d.raw() + return f.d.rawBytes() } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) TimeRtidIfBinc() uintptr { @@ -197,7 +202,7 @@ func (f genHelperDecoder) HasExtensions() bool { // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) DecExt(v interface{}) (r bool) { rt := reflect.TypeOf(v).Elem() - rtid := reflect.ValueOf(rt).Pointer() + rtid := rt2id(rt) if xfFn := f.d.h.getExt(rtid); xfFn != nil { f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) return true @@ -205,168 +210,11 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) { return false } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) { +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { return decInferLen(clen, maxlen, unit) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecSendContainerState(c containerState) { - if f.d.cr != nil { - f.d.cr.sendContainerState(c) - } +func (f genHelperDecoder) StringView(v []byte) string { + return stringView(v) } -{{/* - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncDriver() encDriver { - return f.e.e -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecDriver() decDriver { - return f.d.d -} - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncNil() { - f.e.e.EncodeNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncBytes(v []byte) { - f.e.e.EncodeStringBytes(c_RAW, v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayStart(length int) { - f.e.e.EncodeArrayStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEnd() { - f.e.e.EncodeArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncArrayEntrySeparator() { - f.e.e.EncodeArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapStart(length int) { - f.e.e.EncodeMapStart(length) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEnd() { - f.e.e.EncodeMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapEntrySeparator() { - f.e.e.EncodeMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) EncMapKVSeparator() { - f.e.e.EncodeMapKVSeparator() -} - -// --------- - -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecBytes(v *[]byte) { - *v = f.d.d.DecodeBytes(*v) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecTryNil() bool { - return f.d.d.TryDecodeAsNil() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsNil() (b bool) { - return f.d.d.IsContainerType(valueTypeNil) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsMap() (b bool) { - return f.d.d.IsContainerType(valueTypeMap) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecContainerIsArray() (b bool) { - return f.d.d.IsContainerType(valueTypeArray) -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecCheckBreak() bool { - return f.d.d.CheckBreak() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapStart() int { - return f.d.d.ReadMapStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayStart() int { - return f.d.d.ReadArrayStart() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEnd() { - f.d.d.ReadMapEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEnd() { - f.d.d.ReadArrayEnd() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecArrayEntrySeparator() { - f.d.d.ReadArrayEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapEntrySeparator() { - f.d.d.ReadMapEntrySeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) DecMapKVSeparator() { - f.d.d.ReadMapKVSeparator() -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte { - return f.d.d.DecodeStringAsBytes(bs) -} - - -// -- encode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) { - ee := f.e.e - {{ encmd .Primitive "v" }} -} -{{ end }}{{ end }}{{ end }} - -// -- decode calls (primitives) -{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) { - dd := f.d.d - *vp = {{ decmd .Primitive }} -} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) { - dd := f.d.d - v = {{ decmd .Primitive }} - return -} -{{ end }}{{ end }}{{ end }} - - -// -- encode calls (slices/maps) -{{range .Values}}{{if not .Primitive }}{{if .Slice }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e) -} -{{ end }}{{ end }} - -// -- decode calls (slices/maps) -{{range .Values}}{{if not .Primitive }} -// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* -{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) { -{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}} - v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d) - if changed { - *vp = v - } -} -{{ end }}{{ end }} -*/}} diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go index 2ace97b78c..b50a6024dd 100644 --- a/vendor/github.com/ugorji/go/codec/gen.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -10,21 +10,22 @@ const genDecMapTmpl = ` {{var "l"}} := r.ReadMapStart() {{var "bh"}} := z.DecBasicHandle() if {{var "v"}} == nil { - {{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) *{{ .Varname }} = {{var "v"}} } var {{var "mk"}} {{ .KTyp }} var {{var "mv"}} {{ .Typ }} -var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool if {{var "bh"}}.MapValueReset { {{if decElemKindPtr}}{{var "mg"}} = true {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } {{else if not decElemKindImmutable}}{{var "mg"}} = true {{end}} } -if {{var "l"}} > 0 { -for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { {{var "mk"}} = string({{var "bv"}}) @@ -36,34 +37,17 @@ for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ { {{var "ms"}} = false } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { - {{var "v"}}[{{var "mk"}}] = {{var "mv"}} - } -} -} else if {{var "l"}} < 0 { -for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ { - z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) - {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} -{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { - {{var "mk"}} = string({{var "bv"}}) - }{{ end }}{{if decElemKindPtr}} - {{var "ms"}} = true {{ end }} - if {{var "mg"}} { - {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] - if {{var "mok"}} { - {{var "ms"}} = false - } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} - } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} - z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) - {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }} - if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { {{var "v"}}[{{var "mk"}}] = {{var "mv"}} } } } // else len==0: TODO: Should we clear map entries? -z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} ` const genDecListTmpl = ` @@ -82,94 +66,67 @@ if {{var "l"}} == 0 { {{var "v"}} = make({{ .CTyp }}, 0) {{var "c"}} = true } {{end}} -} else if {{var "l"}} > 0 { - {{if isChan }}if {{var "v"}} == nil { - {{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - {{var "v"}} = make({{ .CTyp }}, {{var "rl"}}) - {{var "c"}} = true - } - for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ { - {{var "h"}}.ElemContainerState({{var "r"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - } - {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} - var {{var "rt"}} bool {{/* truncated */}} - _, _ = {{var "rl"}}, {{var "rt"}} - {{var "rr"}} = {{var "l"}} // len({{var "v"}}) +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int; _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { if {{var "l"}} > cap({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) - {{ else }}{{if not .Immutable }} - {{var "rg"}} := len({{var "v"}}) > 0 - {{var "v2"}} := {{var "v"}} {{end}} - {{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) - if {{var "rt"}} { - if {{var "rl"}} <= cap({{var "v"}}) { - {{var "v"}} = {{var "v"}}[:{{var "rl"}}] - } else { - {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) - } + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] } else { {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) } {{var "c"}} = true - {{var "rr"}} = len({{var "v"}}) {{if not .Immutable }} - if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}} - } {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) { + } else if {{var "l"}} != len({{var "v"}}) { {{var "v"}} = {{var "v"}}[:{{var "l"}}] {{var "c"}} = true - } {{end}} {{/* end isSlice:47 */}} - {{var "j"}} := 0 - for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ { - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} } - {{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && len({{var "v"}}) == 0 { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = 8 + } + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} {{var "h"}}.ElemContainerState({{var "j"}}) - z.DecSwallow() - } - {{ else }}if {{var "rt"}} { - for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ { - {{var "v"}} = append({{var "v"}}, {{ zero}}) - {{var "h"}}.ElemContainerState({{var "j"}}) - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } - } {{end}} {{/* end isArray:56 */}} - {{end}} {{/* end isChan:16 */}} -} else { {{/* len < 0 */}} - {{var "j"}} := 0 - for ; !r.CheckBreak(); {{var "j"}}++ { - {{if isChan }} - {{var "h"}}.ElemContainerState({{var "j"}}) - var {{var "t"}} {{ .Typ }} - {{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }} - {{var "v"}} <- {{var "t"}} - {{ else }} + // {{var "dn"}} = r.TryDecodeAsNil() + {{if isChan}}{{ $x := printf "%[1]vv%[2]v" .TempVar .Rand }}var {{var $x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + {{else}} + // if indefinite, etc, then expand the slice if necessary + var {{var "db"}} bool if {{var "j"}} >= len({{var "v"}}) { - {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1) - {{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }} - {{var "c"}} = true {{end}} + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} } - {{var "h"}}.ElemContainerState({{var "j"}}) - if {{var "j"}} < len({{var "v"}}) { - {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} - } else { + if {{var "db"}} { z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} } - {{end}} + {{end}} } - {{if isSlice }}if {{var "j"}} < len({{var "v"}}) { + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { {{var "v"}} = {{var "v"}}[:{{var "j"}}] {{var "c"}} = true } else if {{var "j"}} == 0 && {{var "v"}} == nil { - {{var "v"}} = []{{ .Typ }}{} + {{var "v"}} = make([]{{ .Typ }}, 0) {{var "c"}} = true - }{{end}} + } {{end}} } {{var "h"}}.End() {{if not isArray }}if {{var "c"}} { *{{ .Varname }} = {{var "v"}} }{{end}} + ` diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go index da66921a66..7d430e5d41 100644 --- a/vendor/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -1,3 +1,5 @@ +// +build codecgen.exec + // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -80,6 +82,10 @@ import ( // Note: // It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. // This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". // GenVersion is the current version of codecgen. // @@ -94,7 +100,8 @@ import ( // changes in signature of some unpublished helper methods and codecgen cmdline arguments. // v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) // v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. -const GenVersion = 5 +// v6: removed unsafe from gen, and now uses codecgen.exec tag +const genVersion = 8 const ( genCodecPkg = "codec1978" @@ -126,7 +133,6 @@ var ( genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) - genCheckVendor bool ) // genRunner holds some state used during a Gen run. @@ -147,8 +153,7 @@ type genRunner struct { is map[reflect.Type]struct{} // types seen during import search bp string // base PkgPath, for which we are generating for - cpfx string // codec package prefix - unsafe bool // is unsafe to be used in generated code? + cpfx string // codec package prefix tm map[reflect.Type]struct{} // types for which enc/dec must be generated ts []reflect.Type // types for which enc/dec must be generated @@ -158,13 +163,16 @@ type genRunner struct { ti *TypeInfos // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions } // Gen will write a complete go file containing Selfer implementations for each // type passed. All the types must be in the same package. // -// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* -func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { // All types passed to this method do not have a codec.Selfer method implemented directly. // codecgen already checks the AST and skips any types that define the codec.Selfer methods. // Consequently, there's no need to check and trim them if they implement codec.Selfer @@ -173,19 +181,19 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn return } x := genRunner{ - unsafe: useUnsafe, - w: w, - t: typ, - te: make(map[uintptr]bool), - td: make(map[uintptr]bool), - im: make(map[string]reflect.Type), - imn: make(map[string]string), - is: make(map[reflect.Type]struct{}), - tm: make(map[reflect.Type]struct{}), - ts: []reflect.Type{}, - bp: genImportPath(typ[0]), - xs: uid, - ti: ti, + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, } if x.ti == nil { x.ti = defTypeInfos @@ -234,11 +242,8 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.linef("%s \"%s\"", x.imn[k], k) } // add required packages - for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} { + for _, k := range [...]string{"reflect", "runtime", "fmt", "errors"} { if _, ok := x.im[k]; !ok { - if k == "unsafe" && !x.unsafe { - continue - } x.line("\"" + k + "\"") } } @@ -265,20 +270,16 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.line(")") x.line("") - if x.unsafe { - x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}") - x.line("") - } x.hn = "codecSelfer" + x.xs x.line("type " + x.hn + " struct{}") x.line("") x.varsfxreset() x.line("func init() {") - x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) x.line("_, file, _, _ := runtime.Caller(0)") x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) - x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx) + x.linef(`%v, %sGenVersion, file)`, genVersion, x.cpfx) x.line("panic(err)") x.linef("}") x.line("if false { // reference the types, but skip this branch at build/run time") @@ -289,10 +290,6 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) n++ } - if x.unsafe { - x.linef("var v%v unsafe.Pointer", n) - n++ - } if n > 0 { x.out("_") for i := 1; i < n; i++ { @@ -315,7 +312,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn } for _, t := range x.ts { - rtid := reflect.ValueOf(t).Pointer() + rtid := rt2id(t) // generate enc functions for all these slice/map types. x.varsfxreset() x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) @@ -545,21 +542,21 @@ func (x *genRunner) selfer(encode bool) { x.out(fnSigPfx) x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) x.line("}") x.line("") } else { x.out(fnSigPfx) x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) x.line("}") x.line("") x.out(fnSigPfx) x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") x.genRequiredMethodVars(false) - x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) x.line("}") x.line("") } @@ -568,7 +565,7 @@ func (x *genRunner) selfer(encode bool) { x.out(fnSigPfx) x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") x.genRequiredMethodVars(false) - x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) x.line("}") x.line("") @@ -645,7 +642,7 @@ func (x *genRunner) encVar(varname string, t reflect.Type) { // enc will encode a variable (varname) of type t, // except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) func (x *genRunner) enc(varname string, t reflect.Type) { - rtid := reflect.ValueOf(t).Pointer() + rtid := rt2id(t) // We call CodecEncodeSelf if one of the following are honored: // - the type already implements Selfer, call that // - the type has a Selfer implementation just created, use that @@ -720,7 +717,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) { x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname) } // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { + if !x.nx && genImportPath(t) != "" && t.Name() != "" { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) } @@ -780,7 +777,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) { x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))") } else if fastpathAV.index(rtid) != -1 { g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") } else { x.xtraSM(varname, true, t) // x.encListFallback(varname, rtid, t) @@ -794,7 +791,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) { // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") if fastpathAV.index(rtid) != -1 { g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)") + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") } else { x.xtraSM(varname, true, t) // x.encMapFallback(varname, rtid, t) @@ -852,55 +849,64 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { // number of non-empty things we write out first. // This is required as we need to pre-determine the size of the container, // to support length-prefixing. - x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) - x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar) + if ti.anyOmitEmpty { + x.linef("var %s [%v]bool", numfieldsvar, len(tisfi)) + x.linef("_ = %s", numfieldsvar) + } + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) x.linef("const %s bool = %v", ti2arrayvar, ti.toArray) - nn := 0 - for j, si := range tisfi { - if !si.omitEmpty { - nn++ - continue - } - var t2 reflect.StructField - var omitline string - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { - t2typ := t - varname3 := varname - for _, ix := range si.is { - for t2typ.Kind() == reflect.Ptr { - t2typ = t2typ.Elem() - } - t2 = t2typ.Field(ix) - t2typ = t2.Type - varname3 = varname3 + "." + t2.Name - if t2typ.Kind() == reflect.Ptr { - omitline += varname3 + " != nil && " + var nn int + if ti.anyOmitEmpty { + for j, si := range tisfi { + if !si.omitEmpty { + nn++ + continue + } + var t2 reflect.StructField + var omitline string + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + omitline += varname3 + " != nil && " + } } } + // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + switch t2.Type.Kind() { + case reflect.Struct: + omitline += " true" + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + omitline += "len(" + varname + "." + t2.Name + ") != 0" + default: + omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) + } + x.linef("%s[%v] = %s", numfieldsvar, j, omitline) } - // never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. - // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) - switch t2.Type.Kind() { - case reflect.Struct: - omitline += " true" - case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: - omitline += "len(" + varname + "." + t2.Name + ") != 0" - default: - omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type) - } - x.linef("%s[%v] = %s", numfieldsvar, j, omitline) } - x.linef("var %snn%s int", genTempVarPfx, i) + // x.linef("var %snn%s int", genTempVarPfx, i) x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + x.linef("r.WriteArrayStart(%d)", len(tisfi)) x.linef("} else {") // if not ti.toArray - x.linef("%snn%s = %v", genTempVarPfx, i, nn) - x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) - x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i) - x.linef("%snn%s = %v", genTempVarPfx, i, 0) - // x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")") + if ti.anyOmitEmpty { + x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } x.line("}") // close if not StructToArray for j, si := range tisfi { @@ -908,17 +914,18 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { isNilVarName := genTempVarPfx + "n" + i var labelUsed bool var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { + { t2typ := t varname3 := varname - for _, ix := range si.is { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } // fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix) for t2typ.Kind() == reflect.Ptr { t2typ = t2typ.Elem() } - t2 = t2typ.Field(ix) + t2 = t2typ.Field(int(ix)) t2typ = t2.Type varname3 = varname3 + "." + t2.Name if t2typ.Kind() == reflect.Ptr { @@ -941,9 +948,10 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray if labelUsed { - x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + // x.linef("if %s { z.EncSendContainerState(codecSelfer_containerArrayElem%s); r.EncodeNil() } else { ", isNilVarName, x.xs) } - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) if si.omitEmpty { x.linef("if %s[%v] {", numfieldsvar, j) } @@ -962,9 +970,9 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { if si.omitEmpty { x.linef("if %s[%v] {", numfieldsvar, j) } - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.WriteMapElemKey()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))") - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.line("r.WriteMapElemValue()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) if labelUsed { x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") x.encVar(varname+"."+t2.Name, t2.Type) @@ -978,9 +986,9 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { x.linef("} ") // end if/else ti.toArray } x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) x.line("} else {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("r.WriteMapEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) x.line("}") } @@ -996,36 +1004,36 @@ func (x *genRunner) encListFallback(varname string, t reflect.Type) { } i := x.varsfx() g := genTempVarPfx - x.line("r.EncodeArrayStart(len(" + varname + "))") + x.line("r.WriteArrayStart(len(" + varname + "))") if t.Kind() == reflect.Chan { x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) x.linef("%sv%s := <-%s", g, i, varname) } else { // x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) - x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.line("r.WriteArrayElem()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) } x.encVar(genTempVarPfx+"v"+i, t.Elem()) x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("r.WriteArrayEnd()") // x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) } func (x *genRunner) encMapFallback(varname string, t reflect.Type) { // TODO: expand this to handle canonical. i := x.varsfx() - x.line("r.EncodeMapStart(len(" + varname + "))") + x.line("r.WriteMapStart(len(" + varname + "))") x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) // x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {") - x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line("r.WriteMapElemKey()") // f("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs) x.encVar(genTempVarPfx+"k"+i, t.Key()) - x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.line("r.WriteMapElemValue()") // f("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs) x.encVar(genTempVarPfx+"v"+i, t.Elem()) x.line("}") - x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("r.WriteMapEnd()") // f("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) } -func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { +func (x *genRunner) decVar(varname, decodedNilVarname string, t reflect.Type, canBeNil bool) { // We only encode as nil if a nillable value. // This removes some of the wasted checks for TryDecodeAsNil. // We need to think about this more, to see what happens if omitempty, etc @@ -1038,7 +1046,9 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { } if canBeNil { x.line("if r.TryDecodeAsNil() {") - if t.Kind() == reflect.Ptr { + if decodedNilVarname != "" { + x.line(decodedNilVarname + " = true") + } else if t.Kind() == reflect.Ptr { x.line("if " + varname + " != nil { ") // if varname is a field of a struct (has a dot in it), @@ -1098,7 +1108,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { // assumptions: // - the varname is to a pointer already. No need to take address of it // - t is always a baseType T (not a *T, etc). - rtid := reflect.ValueOf(t).Pointer() + rtid := rt2id(t) tptr := reflect.PtrTo(t) if x.checkForSelfer(t, varname) { if t.Implements(selferTyp) || tptr.Implements(selferTyp) { @@ -1156,7 +1166,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname) } // only check for extensions if the type is named, and has a packagePath. - if genImportPath(t) != "" && t.Name() != "" { + if !x.nx && genImportPath(t) != "" && t.Name() != "" { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) } @@ -1231,10 +1241,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) { // - if elements are primitives or Selfers, call dedicated function on each member. // - else call Encoder.encode(XXX) on it. if rtid == uint8SliceTypId { - x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)") + x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)") } else if fastpathAV.index(rtid) != -1 { g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") } else { x.xtraSM(varname, false, t) // x.decListFallback(varname, rtid, false, t) @@ -1246,7 +1256,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { // - else call Encoder.encode(XXX) on it. if fastpathAV.index(rtid) != -1 { g := x.newGenV(t) - x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)") + x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", d)") } else { x.xtraSM(varname, false, t) // x.decMapFallback(varname, rtid, t) @@ -1318,11 +1328,11 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { if t.AssignableTo(uint8SliceTyp) { - x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") return } if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { - x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname) return } type tstruc struct { @@ -1340,13 +1350,13 @@ func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type funcs := make(template.FuncMap) funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + x.decVar(varname, "", telem, false) return "" } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, "", reflect.PtrTo(telem), false) + // return "" + // } funcs["var"] = func(s string) string { return ts.TempVar + s + ts.Rand } @@ -1402,21 +1412,21 @@ func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) return telem.Kind() == reflect.Interface } funcs["decLineVarK"] = func(varname string) string { - x.decVar(varname, tkey, false) + x.decVar(varname, "", tkey, false) return "" } - funcs["decLineVar"] = func(varname string) string { - x.decVar(varname, telem, false) - return "" - } - funcs["decLineK"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) - return "" - } - funcs["decLine"] = func(pfx string) string { - x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false) return "" } + // funcs["decLineK"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false) + // return "" + // } + // funcs["decLine"] = func(pfx string) string { + // x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false) + // return "" + // } funcs["var"] = func(s string) string { return ts.TempVar + s + ts.Rand } @@ -1437,18 +1447,19 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt for _, si := range tisfi { x.line("case \"" + si.encName + "\":") var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { + { //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname - for _, ix := range si.is { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } for t2typ.Kind() == reflect.Ptr { t2typ = t2typ.Elem() } - t2 = t2typ.Field(ix) + t2 = t2typ.Field(int(ix)) t2typ = t2.Type varname3 = varname3 + "." + t2.Name if t2typ.Kind() == reflect.Ptr { @@ -1456,7 +1467,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt } } } - x.decVar(varname+"."+t2.Name, t2.Type, false) + x.decVar(varname+"."+t2.Name, "", t2.Type, false) } x.line("default:") // pass the slice here, so that the string will not escape, and maybe save allocation @@ -1469,17 +1480,6 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref i := x.varsfx() kName := tpfx + "s" + i - // We thought to use ReadStringAsBytes, as go compiler might optimize the copy out. - // However, using that was more expensive, as it seems that the switch expression - // is evaluated each time. - // - // We could depend on decodeString using a temporary/shared buffer internally. - // However, this model of creating a byte array, and using explicitly is faster, - // and allows optional use of unsafe []byte->string conversion without alloc. - - // Also, ensure that the slice array doesn't escape. - // That will help escape analysis prevent allocation when it gets better. - // x.line("var " + kName + "Arr = [32]byte{} // default string to decode into") // x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into") // use the scratch buffer to avoid allocation (most field names are < 32). @@ -1498,21 +1498,15 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) x.line("} else { if r.CheckBreak() { break }; }") } - x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) - x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)") + x.line("r.ReadMapElemKey()") // f("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs) + x.line(kName + "Slc = r.DecodeStringAsBytes()") // let string be scoped to this loop alone, so it doesn't escape. - if x.unsafe { - x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" + - kName + "Slc[0])), len(" + kName + "Slc)}") - x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))") - } else { - x.line(kName + " := string(" + kName + "Slc)") - } - x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) + x.line(kName + " := string(" + kName + "Slc)") + x.line("r.ReadMapElemValue()") // f("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs) x.decStructMapSwitch(kName, varname, rtid, t) x.line("} // end for " + tpfx + "j" + i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) } func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { @@ -1525,18 +1519,19 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length for _, si := range tisfi { var t2 reflect.StructField - if si.i != -1 { - t2 = t.Field(int(si.i)) - } else { + { //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname - for _, ix := range si.is { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } for t2typ.Kind() == reflect.Ptr { t2typ = t2typ.Elem() } - t2 = t2typ.Field(ix) + t2 = t2typ.Field(int(ix)) t2typ = t2.Type varname3 = varname3 + "." + t2.Name if t2typ.Kind() == reflect.Ptr { @@ -1548,10 +1543,10 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", tpfx, i, tpfx, i, tpfx, i, tpfx, i, lenvarname, tpfx, i) - x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", - tpfx, i, x.xs, breakString) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) - x.decVar(varname+"."+t2.Name, t2.Type, true) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + // x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }", tpfx, i, x.xs, breakString) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.decVar(varname+"."+t2.Name, "", t2.Type, true) } // read remaining values and throw away. x.line("for {") @@ -1559,10 +1554,10 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid tpfx, i, tpfx, i, tpfx, i, tpfx, i, lenvarname, tpfx, i) x.linef("if %sb%s { break }", tpfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) + x.line("r.ReadArrayElem()") // f("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs) x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) x.line("}") - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) } func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { @@ -1572,7 +1567,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) + x.line("r.ReadMapEnd()") // f("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs) if genUseOneFunctionForDecStructMap { x.line("} else { ") x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i) @@ -1588,7 +1583,7 @@ func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") x.linef("if %sl%s == 0 {", genTempVarPfx, i) - x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) + x.line("r.ReadArrayEnd()") // f("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs) x.line("} else { ") x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i) x.line("}") @@ -1776,8 +1771,8 @@ func genIsImmutable(t reflect.Type) (v bool) { } type genInternal struct { - Values []genV - Unsafe bool + Version int + Values []genV } func (x genInternal) FastpathLen() (l int) { @@ -1802,6 +1797,21 @@ func genInternalZeroValue(s string) string { } } +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return `"string-is-an-interface"` // return string, to remove ambiguity + case "bool": + return "true" + case "string": + return `"some-string"` + case "float32", "float64", "float", "double": + return "10.1" + default: + return "10" + } +} + func genInternalEncCommandAsString(s string, vname string) string { switch s { case "uint", "uint8", "uint16", "uint32", "uint64": @@ -1891,7 +1901,7 @@ func stripVendor(s string) string { } // var genInternalMu sync.Mutex -var genInternalV genInternal +var genInternalV = genInternal{Version: genVersion} var genInternalTmplFuncs template.FuncMap var genInternalOnce sync.Once @@ -1954,7 +1964,7 @@ func genInternalInit() { "float64": 8, "bool": 1, } - var gt genInternal + var gt = genInternal{Version: genVersion} // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function for _, s := range types { @@ -1975,6 +1985,7 @@ func genInternalInit() { funcs["encmd"] = genInternalEncCommandAsString funcs["decmd"] = genInternalDecCommandAsString funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue funcs["hasprefix"] = strings.HasPrefix funcs["sorttype"] = genInternalSortType @@ -1986,11 +1997,10 @@ func genInternalInit() { // It is run by the program author alone. // Unfortunately, it has to be exported so that it can be called from a command line tool. // *** DO NOT USE *** -func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) { +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { genInternalOnce.Do(genInternalInit) gt := genInternalV - gt.Unsafe = !safe t := template.New("").Funcs(genInternalTmplFuncs) diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go similarity index 59% rename from vendor/github.com/ugorji/go/codec/decode_go.go rename to vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go index ba289cef61..7567e2c07b 100644 --- a/vendor/github.com/ugorji/go/codec/decode_go.go +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go @@ -9,8 +9,6 @@ import "reflect" const reflectArrayOfSupported = true -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() - reflect.Copy(rvn2, rvn) - return +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) } diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go similarity index 64% rename from vendor/github.com/ugorji/go/codec/decode_go14.go rename to vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go index 50063bc8fc..ec94bd0c0a 100644 --- a/vendor/github.com/ugorji/go/codec/decode_go14.go +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go @@ -9,6 +9,6 @@ import "reflect" const reflectArrayOfSupported = false -func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { - panic("reflect.ArrayOf unsupported") +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") } diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go new file mode 100644 index 0000000000..51fe40e5bf --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go new file mode 100644 index 0000000000..d4b9c2c8d9 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 0000000000..dcd8c3d11c --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go similarity index 72% rename from vendor/github.com/ugorji/go/codec/gen_15.go rename to vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go index ab76c31027..68626e1ce7 100644 --- a/vendor/github.com/ugorji/go/codec/gen_15.go +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go @@ -7,6 +7,4 @@ package codec import "os" -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" -} +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go similarity index 65% rename from vendor/github.com/ugorji/go/codec/gen_16.go rename to vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go index 87c04e2e18..344f5967be 100644 --- a/vendor/github.com/ugorji/go/codec/gen_16.go +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go @@ -1,12 +1,10 @@ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. -// +build go1.6 +// +build go1.6,!go1.7 package codec import "os" -func init() { - genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" -} +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go similarity index 82% rename from vendor/github.com/ugorji/go/codec/gen_17.go rename to vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go index 3881a43ce6..de91d29407 100644 --- a/vendor/github.com/ugorji/go/codec/gen_17.go +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go @@ -5,6 +5,4 @@ package codec -func init() { - genCheckVendor = true -} +const genCheckVendor = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go new file mode 100644 index 0000000000..9d007bfed4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go index 8b94fc1e4d..51bdb9cd9f 100644 --- a/vendor/github.com/ugorji/go/codec/helper.go +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -103,8 +103,10 @@ import ( "errors" "fmt" "math" + "os" "reflect" "sort" + "strconv" "strings" "sync" "time" @@ -112,32 +114,56 @@ import ( const ( scratchByteArrayLen = 32 - initCollectionCap = 32 // 32 is defensive. 16 is preferred. + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. // Support encoding.(Binary|Text)(Unm|M)arshaler. // This constant flag will enable or disable it. supportMarshalInterfaces = true - // Each Encoder or Decoder uses a cache of functions based on conditionals, - // so that the conditionals are not run every time. - // - // Either a map or a slice is used to keep track of the functions. - // The map is more natural, but has a higher cost than a slice/array. - // This flag (useMapForCodecCache) controls which is used. - // - // From benchmarks, slices with linear search perform better with < 32 entries. - // We have typically seen a high threshold of about 24 entries. - useMapForCodecCache = false - // for debugging, set this to false, to catch panic traces. // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. - // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. - // The chances of this are slim, so leave this "optimization". - // TODO: should this be true, to ensure that we always decode into a "zero" "empty" value? - resetSliceElemToZeroValue bool = false + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + arrayCacheLen = 8 + + // We tried an optimization, where we detect if a type is one of the known types + // we optimized for (e.g. int, []uint64, etc). + // + // However, we notice some worse performance when using this optimization. + // So we hide it behind a flag, to turn on if needed. + useLookupRecognizedTypes = false + + // using recognized allows us to do d.decode(interface{}) instead of d.decodeValue(reflect.Value) + // when we can infer that the kind of the interface{} is one of the ones hard-coded in the + // type switch for known types or the ones defined by fast-path. + // + // However, it seems we get better performance when we don't recognize, and just let + // reflection handle it. + // + // Reasoning is as below: + // typeswitch is a binary search with a branch to a code-point. + // getdecfn is a binary search with a call to a function pointer. + // + // both are about the same. + // + // so: why prefer typeswitch? + // + // is recognized does the following: + // - lookup rtid + // - check if in sorted list + // - calls decode(type switch) + // - 1 or 2 binary search to a point in code + // - branch there + // + // vs getdecfn + // - lookup rtid + // - check in sorted list for a function pointer + // - calls it to decode using reflection (optimized) + + // always set xDebug = false before releasing software + xDebug = true ) var ( @@ -145,6 +171,27 @@ var ( zeroByteSlice = oneByteArr[:0:0] ) +var refBitset bitset32 + +var pool pooler + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +// type findCodecFnMode uint8 + +// const ( +// findCodecFnModeMap findCodecFnMode = iota +// findCodecFnModeBinarySearch +// findCodecFnModeLinearSearch +// ) + type charEncoding uint8 const ( @@ -177,6 +224,36 @@ const ( // valueTypeInvalid = 0xff ) +func (x valueType) String() string { + switch x { + case valueTypeNil: + return "Nil" + case valueTypeInt: + return "Int" + case valueTypeUint: + return "Uint" + case valueTypeFloat: + return "Float" + case valueTypeBool: + return "Bool" + case valueTypeString: + return "String" + case valueTypeSymbol: + return "Symbol" + case valueTypeBytes: + return "Bytes" + case valueTypeMap: + return "Map" + case valueTypeArray: + return "Array" + case valueTypeTimestamp: + return "Timestamp" + case valueTypeExt: + return "Ext" + } + return strconv.FormatInt(int64(x), 10) +} + type seqType uint8 const ( @@ -216,30 +293,26 @@ const rgetMaxRecursion = 2 // Anecdotally, we believe most types have <= 12 fields. // Java's PMD rules set TooManyFields threshold to 15. -const rgetPoolTArrayLen = 12 +const typeInfoLoadArrayLen = 12 -type rgetT struct { +type typeInfoLoad struct { fNames []string encNames []string etypes []uintptr sfis []*structFieldInfo } -type rgetPoolT struct { - fNames [rgetPoolTArrayLen]string - encNames [rgetPoolTArrayLen]string - etypes [rgetPoolTArrayLen]uintptr - sfis [rgetPoolTArrayLen]*structFieldInfo - sfiidx [rgetPoolTArrayLen]sfiIdx +type typeInfoLoadArray struct { + fNames [typeInfoLoadArrayLen]string + encNames [typeInfoLoadArrayLen]string + etypes [typeInfoLoadArrayLen]uintptr + sfis [typeInfoLoadArrayLen]*structFieldInfo + sfiidx [typeInfoLoadArrayLen]sfiIdx } -var rgetPool = sync.Pool{ - New: func() interface{} { return new(rgetPoolT) }, -} - -type containerStateRecv interface { - sendContainerState(containerState) -} +// type containerStateRecv interface { +// sendContainerState(containerState) +// } // mirror json.Marshaler and json.Unmarshaler here, // so we don't import the encoding/json package @@ -250,6 +323,8 @@ type jsonUnmarshaler interface { UnmarshalJSON([]byte) error } +// type byteAccepter func(byte) bool + var ( bigen = binary.BigEndian structInfoFieldName = "_struct" @@ -278,17 +353,17 @@ var ( selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() - uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() - rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() - rawTypId = reflect.ValueOf(rawTyp).Pointer() - intfTypId = reflect.ValueOf(intfTyp).Pointer() - timeTypId = reflect.ValueOf(timeTyp).Pointer() - stringTypId = reflect.ValueOf(stringTyp).Pointer() + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) - mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() - mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() - intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() - // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) @@ -303,6 +378,115 @@ var ( var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +var ( + recognizedRtids []uintptr + recognizedRtidPtrs []uintptr + recognizedRtidOrPtrs []uintptr + recognizedRtidsLoaded bool +) + +func init() { + if !useLookupRecognizedTypes { + return + } + if recognizedRtidsLoaded { + panic("recognizedRtidsLoaded = true - cannot happen") + } + for _, v := range [...]interface{}{ + float32(0), + float64(0), + uintptr(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + bool(false), + string(""), + Raw{}, + []byte(nil), + } { + rt := reflect.TypeOf(v) + recognizedRtids = append(recognizedRtids, rt2id(rt)) + recognizedRtidPtrs = append(recognizedRtidPtrs, rt2id(reflect.PtrTo(rt))) + } + + // now sort it. + sort.Sort(uintptrSlice(recognizedRtids)) + sort.Sort(uintptrSlice(recognizedRtidPtrs)) + recognizedRtidOrPtrs = make([]uintptr, len(recognizedRtids)+len(recognizedRtidPtrs)) + copy(recognizedRtidOrPtrs, recognizedRtids) + copy(recognizedRtidOrPtrs[len(recognizedRtids):], recognizedRtidPtrs) + sort.Sort(uintptrSlice(recognizedRtidOrPtrs)) + + recognizedRtidsLoaded = true +} + +func containsU(s []uintptr, v uintptr) bool { + // return false // TODO: REMOVE + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h] < v { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i] == v { + return true + } + return false +} + +func isRecognizedRtid(rtid uintptr) bool { + return containsU(recognizedRtids, rtid) +} + +func isRecognizedRtidPtr(rtid uintptr) bool { + return containsU(recognizedRtidPtrs, rtid) +} + +func isRecognizedRtidOrPtr(rtid uintptr) bool { + return containsU(recognizedRtidOrPtrs, rtid) +} + // Selfer defines methods by which a value can encode or decode itself. // // Any type which implements Selfer will be able to encode or decode itself. @@ -336,6 +520,7 @@ type BasicHandle struct { extHandle EncodeOptions DecodeOptions + noBuiltInTypeChecker } func (x *BasicHandle) getBasicHandle() *BasicHandle { @@ -343,10 +528,10 @@ func (x *BasicHandle) getBasicHandle() *BasicHandle { } func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - if x.TypeInfos != nil { - return x.TypeInfos.get(rtid, rt) + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) } - return defTypeInfos.get(rtid, rt) + return x.TypeInfos.get(rtid, rt) } // Handle is the interface for a specific encoding format. @@ -359,6 +544,8 @@ type Handle interface { newEncDriver(w *Encoder) encDriver newDecDriver(r *Decoder) decDriver isBinary() bool + hasElemSeparators() bool + IsBuiltinType(rtid uintptr) bool } // Raw represents raw formatted bytes. @@ -476,9 +663,6 @@ func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) { x.i.UpdateExt(dest, v) } -// type errorString string -// func (x errorString) Error() string { return string(x) } - type binaryEncodingType struct{} func (_ binaryEncodingType) isBinary() bool { return true } @@ -489,15 +673,23 @@ func (_ textEncodingType) isBinary() bool { return false } // noBuiltInTypes is embedded into many types which do not support builtins // e.g. msgpack, simple, cbor. -type noBuiltInTypes struct{} -func (_ noBuiltInTypes) IsBuiltinType(rt uintptr) bool { return false } +type noBuiltInTypeChecker struct{} + +func (_ noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } + +type noBuiltInTypes struct{ noBuiltInTypeChecker } + func (_ noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} func (_ noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} -type noStreamingCodec struct{} +// type noStreamingCodec struct{} +// func (_ noStreamingCodec) CheckBreak() bool { return false } +// func (_ noStreamingCodec) hasElemSeparators() bool { return false } -func (_ noStreamingCodec) CheckBreak() bool { return false } +type noElemSeparators struct{} + +func (_ noElemSeparators) hasElemSeparators() (v bool) { return } // bigenHelper. // Users must already slice the x completely, because we will not reslice. @@ -559,7 +751,7 @@ func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { return } - rtid := reflect.ValueOf(rt).Pointer() + rtid := rt2id(rt) for _, v := range *o { if v.rtid == rtid { v.tag, v.ext = tag, ext @@ -596,64 +788,44 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { return nil } +const maxLevelsEmbedding = 16 + type structFieldInfo struct { encName string // encode name fieldName string // field name - // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. - - is []int // (recursive/embedded) field index in struct - i int16 // field index in struct + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. omitEmpty bool toArray bool // if field is _struct, is the toArray set? } -// func (si *structFieldInfo) isZero() bool { -// return si.encName == "" && len(si.is) == 0 && si.i == 0 && !si.omitEmpty && !si.toArray -// } +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} // rv returns the field of the struct. // If anonymous, it returns an Invalid -func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - return v - } +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - if !update { - return - } - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() + for i, x := range si.is { + if uint8(i) == si.nis { + break } - v = v.Field(x) + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) } - return v + + return v, true } -func (si *structFieldInfo) setToZeroValue(v reflect.Value) { - if si.i != -1 { - v = v.Field(int(si.i)) - v.Set(reflect.Zero(v.Type())) - // v.Set(reflect.New(v.Type()).Elem()) - // v.Set(reflect.New(v.Type())) - } else { - // replicate FieldByIndex - for _, x := range si.is { - for v.Kind() == reflect.Ptr { - if v.IsNil() { - return - } - v = v.Elem() - } - v = v.Field(x) - } - v.Set(reflect.Zero(v.Type())) - } +func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { + v, _ = si.field(v, update) + return v } func parseStructFieldInfo(fname string, stag string) *structFieldInfo { @@ -697,7 +869,98 @@ func (p sfiSortedByEncName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -// typeInfo keeps information about each type referenced in the encode/decode sequence. +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + // defer func() { fmt.Printf(">>>> found in cache2? %v\n", valid) }() + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. // // During an encode/decode sequence, we work as below: // - If base is a built in type, en/decode base value @@ -711,74 +974,79 @@ type typeInfo struct { rt reflect.Type rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind numMeth uint16 // number of methods - // baseId gives pointer to the base reflect.Type, after deferencing - // the pointers. E.g. base type of ***time.Time is time.Time. - base reflect.Type - baseId uintptr - baseIndir int8 // number of indirections to get to base + anyOmitEmpty bool mbs bool // base type (T or *T) is a MapBySlice - bm bool // base type (T or *T) is a binaryMarshaler - bunm bool // base type (T or *T) is a binaryUnmarshaler - bmIndir int8 // number of indirections to get to binaryMarshaler type - bunmIndir int8 // number of indirections to get to binaryUnmarshaler type + // format of marshal type fields below: [btj][mu]p? OR csp? - tm bool // base type (T or *T) is a textMarshaler - tunm bool // base type (T or *T) is a textUnmarshaler - tmIndir int8 // number of indirections to get to textMarshaler type - tunmIndir int8 // number of indirections to get to textUnmarshaler type - - jm bool // base type (T or *T) is a jsonMarshaler - junm bool // base type (T or *T) is a jsonUnmarshaler - jmIndir int8 // number of indirections to get to jsonMarshaler type - junmIndir int8 // number of indirections to get to jsonUnmarshaler type - - cs bool // base type (T or *T) is a Selfer - csIndir int8 // number of indirections to get to Selfer type + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer toArray bool // whether this (struct) type should be encoded as an array } +// define length beyond which we do a binary search instead of a linear search. +// From our testing, linear search seems faster than binary search up to 16-field structs. +// However, we set to 8 similar to what python does for hashtables. +const indexForEncNameBinarySearchThreshold = 8 + func (ti *typeInfo) indexForEncName(name string) int { // NOTE: name may be a stringView, so don't pass it to another function. //tisfi := ti.sfi - const binarySearchThreshold = 16 - if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { - // linear search. faster than binary search in my testing up to 16-field structs. + sfilen := len(ti.sfi) + if sfilen < indexForEncNameBinarySearchThreshold { for i, si := range ti.sfi { if si.encName == name { return i } } - } else { - // binary search. adapted from sort/search.go. - h, i, j := 0, 0, sfilen - for i < j { - h = i + (j-i)/2 - if ti.sfi[h].encName < name { - i = h + 1 - } else { - j = h - } - } - if i < sfilen && ti.sfi[i].encName == name { - return i + return -1 + } + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h } } + if i < sfilen && ti.sfi[i].encName == name { + return i + } return -1 } +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + // TypeInfos caches typeInfo for each type on first inspection. // // It is configured with a set of tag keys, which are used to get // configuration for the type. type TypeInfos struct { - infos map[uintptr]*typeInfo - mu sync.RWMutex + infos atomicTypeInfoSlice // formerly map[uintptr]*typeInfo, now *[]rtid2ti + mu sync.Mutex tags []string } @@ -787,7 +1055,7 @@ type TypeInfos struct { // This allows users customize the struct tag keys which contain configuration // of their types. func NewTypeInfos(tags []string) *TypeInfos { - return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)} + return &TypeInfos{tags: tags} } func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { @@ -802,90 +1070,101 @@ func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { return } +func (x *TypeInfos) find(sp *[]rtid2ti, rtid uintptr) (idx int, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // if sp == nil { + // return -1, nil + // } + s := *sp + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i].rtid == rtid { + return i, s[i].ti + } + return i, nil +} + func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { - var ok bool - x.mu.RLock() - pti, ok = x.infos[rtid] - x.mu.RUnlock() - if ok { - return + sp := x.infos.load() + var idx int + if sp != nil { + idx, pti = x.find(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panic(fmt.Errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)) } // do not hold lock while computing this. // it may lead to duplication, but that's ok. ti := typeInfo{rt: rt, rtid: rtid} + // ti.rv0 = reflect.Zero(rt) + ti.numMeth = uint16(rt.NumMethod()) - var indir int8 - if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { - ti.bm, ti.bmIndir = true, indir - } - if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { - ti.bunm, ti.bunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textMarshalerTyp); ok { - ti.tm, ti.tmIndir = true, indir - } - if ok, indir = implementsIntf(rt, textUnmarshalerTyp); ok { - ti.tunm, ti.tunmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonMarshalerTyp); ok { - ti.jm, ti.jmIndir = true, indir - } - if ok, indir = implementsIntf(rt, jsonUnmarshalerTyp); ok { - ti.junm, ti.junmIndir = true, indir - } - if ok, indir = implementsIntf(rt, selferTyp); ok { - ti.cs, ti.csIndir = true, indir - } - if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { - ti.mbs = true - } + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + ti.mbs, _ = implIntf(rt, mapBySliceTyp) - pt := rt - var ptIndir int8 - // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } - for pt.Kind() == reflect.Ptr { - pt = pt.Elem() - ptIndir++ - } - if ptIndir == 0 { - ti.base = rt - ti.baseId = rtid - } else { - ti.base = pt - ti.baseId = reflect.ValueOf(pt).Pointer() - ti.baseIndir = ptIndir - } - - if rt.Kind() == reflect.Struct { + if rk == reflect.Struct { var omitEmpty bool if f, ok := rt.FieldByName(structInfoFieldName); ok { siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) ti.toArray = siInfo.toArray omitEmpty = siInfo.omitEmpty } - pi := rgetPool.Get() - pv := pi.(*rgetPoolT) - pv.etypes[0] = ti.baseId - vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + pp, pi := pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} x.rget(rt, rtid, omitEmpty, nil, &vv) - ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) - rgetPool.Put(pi) + ti.sfip, ti.sfi, ti.anyOmitEmpty = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + pp.Put(pi) } // sfi = sfip + var vs []rtid2ti x.mu.Lock() - if pti, ok = x.infos[rtid]; !ok { + sp = x.infos.load() + if sp == nil { pti = &ti - x.infos[rtid] = pti + vs = []rtid2ti{{rtid, pti}} + x.infos.store(&vs) + } else { + idx, pti = x.find(sp, rtid) + if pti == nil { + s := *sp + pti = &ti + vs = make([]rtid2ti, len(s)+1) + copy(vs, s[:idx]) + vs[idx] = rtid2ti{rtid, pti} + copy(vs[idx+1:], s[idx:]) + x.infos.store(&vs) + } } x.mu.Unlock() return } func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, - indexstack []int, pv *rgetT, + indexstack []uint16, pv *typeInfoLoad, ) { // Read up fields and store how to access the value. // @@ -895,10 +1174,13 @@ func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, // Note: we consciously use slices, not a map, to simulate a set. // Typically, types have < 16 fields, // and iteration using equals is faster than maps there - + flen := rt.NumField() + if flen > (1< maxLevelsEmbedding-1 { + panic(fmt.Errorf("codec: only supports up to %v depth of embedding - type has %v depth", maxLevelsEmbedding-1, len(indexstack))) } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j if omitEmpty { si.omitEmpty = true @@ -1001,16 +1280,16 @@ LOOP: // resolves the struct field info got from a call to rget. // Returns a trimmed, unsorted and sorted []*structFieldInfo. -func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo, anyOmitEmpty bool) { var n int for i, v := range x { - xn := v.encName //TODO: fieldName or encName? use encName for now. + xn := v.encName // TODO: fieldName or encName? use encName for now. var found bool for j, k := range pv { if k.name == xn { // one of them must be reset to nil, and the index updated appropriately to the other one - if len(v.is) == len(x[k.index].is) { - } else if len(v.is) < len(x[k.index].is) { + if v.nis == x[k.index].nis { + } else if v.nis < x[k.index].nis { pv[j].index = i if x[k.index] != nil { x[k.index] = nil @@ -1038,6 +1317,9 @@ func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) if v == nil { continue } + if !anyOmitEmpty && v.omitEmpty { + anyOmitEmpty = true + } y[n] = v n++ } @@ -1048,15 +1330,37 @@ func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) return } +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +func xprintf(format string, a ...interface{}) { + if xDebug { + fmt.Fprintf(os.Stderr, format, a...) + } +} + func panicToErr(err *error) { if recoverPanicToErr { if x := recover(); x != nil { - //debug.PrintStack() + // if false && xDebug { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + // } panicValToErr(x, err) } } } +func panicToErrs2(err1, err2 *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + panicValToErr(x, err1) + panicValToErr(x, err2) + } + } +} + // func doPanic(tag string, format string, params ...interface{}) { // params2 := make([]interface{}, len(params)+1) // params2[0] = tag @@ -1065,24 +1369,280 @@ func panicToErr(err *error) { // } func isImmutableKind(k reflect.Kind) (v bool) { - return false || - k == reflect.Int || - k == reflect.Int8 || - k == reflect.Int16 || - k == reflect.Int32 || - k == reflect.Int64 || - k == reflect.Uint || - k == reflect.Uint8 || - k == reflect.Uint16 || - k == reflect.Uint32 || - k == reflect.Uint64 || - k == reflect.Uintptr || - k == reflect.Float32 || - k == reflect.Float64 || - k == reflect.Bool || - k == reflect.String + return immutableKindsSet[k] + // return false || + // k == reflect.Int || + // k == reflect.Int8 || + // k == reflect.Int16 || + // k == reflect.Int32 || + // k == reflect.Int64 || + // k == reflect.Uint || + // k == reflect.Uint8 || + // k == reflect.Uint16 || + // k == reflect.Uint32 || + // k == reflect.Uint64 || + // k == reflect.Uintptr || + // k == reflect.Float32 || + // k == reflect.Float64 || + // k == reflect.Bool || + // k == reflect.String } +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrE bool +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) +} + +type codecRtidFn struct { + rtid uintptr + fn codecFn +} + +type codecFner struct { + hh Handle + h *BasicHandle + cs [arrayCacheLen]*[arrayCacheLen]codecRtidFn + s []*[arrayCacheLen]codecRtidFn + sn uint32 + be bool + js bool + cf [arrayCacheLen]codecRtidFn +} + +func (c *codecFner) reset(hh Handle) { + c.hh = hh + c.h = hh.getBasicHandle() + _, c.js = hh.(*JsonHandle) + c.be = hh.isBinary() +} + +func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + var j uint32 + var sn uint32 = c.sn + if sn == 0 { + c.s = c.cs[:1] + c.s[0] = &c.cf + c.cf[0].rtid = rtid + fn = &(c.cf[0].fn) + c.sn = 1 + } else { + LOOP1: + for _, x := range c.s { + for i := range x { + if j == sn { + break LOOP1 + } + if x[i].rtid == rtid { + fn = &(x[i].fn) + return + } + j++ + } + } + sx, sy := sn/arrayCacheLen, sn%arrayCacheLen + if sy == 0 { + c.s = append(c.s, &[arrayCacheLen]codecRtidFn{}) + } + c.s[sx][sy].rtid = rtid + fn = &(c.s[sx][sy].fn) + c.sn++ + } + + ti := c.h.getTypeInfo(rtid, rt) + fi := &(fn.i) + fi.ti = ti + + rk := rt.Kind() + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrD = true + fi.addrE = true + } else if c.hh.IsBuiltinType(rtid) { + fn.fe = (*Encoder).builtin + fn.fd = (*Decoder).builtin + fi.addrD = true + } else if xfFn := c.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if rt.PkgPath() == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(rt.Key(), rt.Elem()) + } else { + rtu = reflect.SliceOf(rt.Elem()) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUint64 + // case reflect.Ptr: + // fn.fd = (*Decoder).kPtr + case reflect.Uintptr: + fn.fe = (*Encoder).kUint + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrD = false + rt2 := reflect.SliceOf(rt.Elem()) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + // println(">>>>>> decoding an array ... ") + d.cf.get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + // println(">>>>>> decoding an array ... DONE") + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + // case reflect.Ptr: + // fn.fe = (*Encoder).kPtr + // case reflect.Interface: + // fn.fe = (*Encoder).kInterface + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + default: + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + + return +} + +// ---- + // these functions must be inlinable, and not call anybody type checkOverflow struct{} @@ -1140,6 +1700,7 @@ func isNaN(f float64) bool { return f != f } type intSlice []int64 type uintSlice []uint64 +type uintptrSlice []uintptr type floatSlice []float64 type boolSlice []bool type stringSlice []string @@ -1153,6 +1714,10 @@ func (p uintSlice) Len() int { return len(p) } func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p uintptrSlice) Len() int { return len(p) } +func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + func (p floatSlice) Len() int { return len(p) } func (p floatSlice) Less(i, j int) bool { return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) @@ -1249,7 +1814,6 @@ type set []uintptr func (s *set) add(v uintptr) (exists bool) { // e.ci is always nil, or len >= 1 - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() x := *s if x == nil { x = make([]uintptr, 1, 8) @@ -1290,7 +1854,6 @@ func (s *set) add(v uintptr) (exists bool) { } func (s *set) remove(v uintptr) (exists bool) { - // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() x := *s if len(x) == 0 { return @@ -1312,3 +1875,89 @@ func (s *set) remove(v uintptr) (exists bool) { } return } + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. + +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset256) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +type bitset128 [16]byte + +func (x *bitset128) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset128) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset128) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +type bitset32 [4]byte + +func (x *bitset32) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} +func (x *bitset32) unset(pos byte) { + x[pos>>3] &^= (1 << (pos & 7)) +} +func (x *bitset32) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} + +// ------------ + +type pooler struct { + // for stringRV + strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool + // for the decNaked + dn sync.Pool + tiload sync.Pool +} + +func (p *pooler) init() { + p.strRv8.New = func() interface{} { return new([8]stringRv) } + p.strRv16.New = func() interface{} { return new([16]stringRv) } + p.strRv32.New = func() interface{} { return new([32]stringRv) } + p.strRv64.New = func() interface{} { return new([64]stringRv) } + p.strRv128.New = func() interface{} { return new([128]stringRv) } + p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } +} + +func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) { + return &p.strRv8, p.strRv8.Get() +} +func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) { + return &p.strRv16, p.strRv16.Get() +} +func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) { + return &p.strRv32, p.strRv32.Get() +} +func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) { + return &p.strRv64, p.strRv64.Get() +} +func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) { + return &p.strRv128, p.strRv128.Get() +} +func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { + return &p.dn, p.dn.Get() +} +func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { + return &p.tiload, p.tiload.Get() +} diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go index 5d0727f77f..5de92ba087 100644 --- a/vendor/github.com/ugorji/go/codec/helper_internal.go +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -9,7 +9,6 @@ package codec import ( "errors" "fmt" - "math" "reflect" ) @@ -86,37 +85,6 @@ func pruneSignExt(v []byte, pos bool) (n int) { return } -func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { - if typ == nil { - return - } - rt := typ - // The type might be a pointer and we need to keep - // dereferencing to the base type until we find an implementation. - for { - if rt.Implements(iTyp) { - return true, indir - } - if p := rt; p.Kind() == reflect.Ptr { - indir++ - if indir >= math.MaxInt8 { // insane number of indirections - return false, 0 - } - rt = p.Elem() - continue - } - break - } - // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. - if typ.Kind() != reflect.Ptr { - // Not a pointer, but does the pointer work? - if reflect.PtrTo(typ).Implements(iTyp) { - return true, -1 - } - } - return false, 0 -} - // validate that this function is correct ... // culled from OGRE (Object-Oriented Graphics Rendering Engine) // function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) @@ -219,24 +187,3 @@ func growCap(oldCap, unit, num int) (newCap int) { } return } - -func expandSliceValue(s reflect.Value, num int) reflect.Value { - if num <= 0 { - return s - } - l0 := s.Len() - l1 := l0 + num // new slice length - if l1 < l0 { - panic("ExpandSlice: slice overflow") - } - c0 := s.Cap() - if l1 <= c0 { - return s.Slice(0, l1) - } - st := s.Type() - c1 := growCap(c0, int(st.Elem().Size()), num) - s2 := reflect.MakeSlice(st, l1, c1) - // println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1) - reflect.Copy(s2, s) - return s2 -} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go index f254b98860..ef5b73f49d 100644 --- a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -1,10 +1,17 @@ -// +build !unsafe +// +build !go1.7 safe appengine // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. package codec +import ( + "reflect" + "sync/atomic" +) + +const safeMode = true + // stringView returns a view of the []byte as a string. // In unsafe mode, it doesn't incur allocation and copying caused by conversion. // In regular safe mode, it is an allocation and copy. @@ -25,12 +32,129 @@ func bytesView(v string) []byte { return []byte(v) } -// keepAlive4BytesView maintains a reference to the input parameter for bytesView. -// -// Usage: call this at point where done with the bytes view. -func keepAlive4BytesView(v string) {} +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unneessarily incur the cost of reflection this early. + return false + // rv := reflect.ValueOf(v) + // switch rv.Kind() { + // case reflect.Invalid: + // return true + // case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: + // return rv.IsNil() + // default: + // return false + // } +} -// keepAlive4BytesView maintains a reference to the input parameter for stringView. -// -// Usage: call this at point where done with the string view. -func keepAlive4StringView(v []byte) {} +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func rv2rtid(rv reflect.Value) uintptr { + return reflect.ValueOf(rv.Type()).Pointer() +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (_ *ptrToRvMap) init() {} +// func (_ *ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + i := x.v.Load() + if i == nil { + return nil + } + return i.(*[]rtid2ti) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat(false)) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt(64)) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint(64)) +} diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go index 6c146f77cd..e2c4afecee 100644 --- a/vendor/github.com/ugorji/go/codec/helper_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -1,4 +1,6 @@ -// +build unsafe +// +build !safe +// +build !appengine +// +build go1.7 // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -6,13 +8,19 @@ package codec import ( - "runtime" + "reflect" + "sync/atomic" "unsafe" ) // This file has unsafe variants of some helper methods. // NOTE: See helper_not_unsafe.go for the usage information. +// var zeroRTv [4]uintptr + +const safeMode = false +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + type unsafeString struct { Data uintptr Len int @@ -24,6 +32,17 @@ type unsafeSlice struct { Cap int } +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + func stringView(v []byte) string { if len(v) == 0 { return "" @@ -44,10 +63,369 @@ func bytesView(v string) []byte { return *(*[]byte)(unsafe.Pointer(&bx)) } -func keepAlive4BytesView(v string) { - runtime.KeepAlive(v) +func definitelyNil(v interface{}) bool { + // There is no global way of checking if an interface is nil. + // For true references (map, ptr, func, chan), you can just look + // at the word of the interface. However, for slices, you have to dereference + // the word, and get a pointer to the 3-word interface value. + + // var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) + // var word unsafe.Pointer = ui.word + // // fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) + // return word == nil // || *((*unsafe.Pointer)(word)) == nil + return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil } -func keepAlive4StringView(v []byte) { - runtime.KeepAlive(v) +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// TODO: consider a more generally-known optimization for reflect.Value ==> Interface +// +// Currently, we use this fragile method that taps into implememtation details from +// the source go stdlib reflect/value.go, +// and trims the implementation. +func rv2i(rv reflect.Value) interface{} { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir + var ptr unsafe.Pointer + // kk := reflect.Kind(urv.flag & (1<<5 - 1)) + // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { + if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { + ptr = *(*unsafe.Pointer)(urv.ptr) + } else { + ptr = urv.ptr + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) + // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) + // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) } + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +func rv2rtid(rv reflect.Value) uintptr { + return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +} + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// -------------------------- +type atomicTypeInfoSlice struct { + v unsafe.Pointer +} + +func (x *atomicTypeInfoSlice) load() *[]rtid2ti { + return (*[]rtid2ti)(atomic.LoadPointer(&x.v)) +} + +func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) { + atomic.StorePointer(&x.v, unsafe.Pointer(p)) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // if urv.flag&unsafeFlagIndir != 0 { + // urv.ptr = *(*unsafe.Pointer)(urv.ptr) + // } + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(d.d.DecodeFloat(true)) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat(false) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(d.d.DecodeInt(intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(d.d.DecodeInt(8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(d.d.DecodeInt(16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(d.d.DecodeInt(32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt(64) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(d.d.DecodeUint(uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(d.d.DecodeUint(8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(d.d.DecodeUint(16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(d.d.DecodeUint(32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint(64) +} + +// ------------ + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go index df67d68cee..a2276070d5 100644 --- a/vendor/github.com/ugorji/go/codec/json.go +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -34,42 +34,61 @@ package codec import ( "bytes" "encoding/base64" - "fmt" "reflect" "strconv" + "unicode" "unicode/utf16" "unicode/utf8" ) //-------------------------------- +var jsonLiterals = [...]byte{ + '"', + 't', 'r', 'u', 'e', + '"', + '"', + 'f', 'a', 'l', 's', 'e', + '"', + '"', + 'n', 'u', 'l', 'l', + '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + jsonLitNullQ = 13 + jsonLitNull = 14 +) + var ( - jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} + // jsonFloat64Pow10 = [...]float64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // 1e20, 1e21, 1e22, + // } - jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, - } - - jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - } + // jsonUint64Pow10 = [...]uint64{ + // 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + // 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + // } // jsonTabs and jsonSpaces are used as caches for indents jsonTabs, jsonSpaces string + + jsonCharHtmlSafeSet bitset128 + jsonCharSafeSet bitset128 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 + // jsonIsFloatSet bitset256 + + jsonU4Set [256]byte ) const ( - // jsonUnreadAfterDecNum controls whether we unread after decoding a number. - // - // instead of unreading, just update d.tok (iff it's not a whitespace char) - // However, doing this means that we may HOLD onto some data which belongs to another stream. - // Thus, it is safest to unread the data when done. - // keep behind a constant flag for now. - jsonUnreadAfterDecNum = true - // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: // - If we see first character of null, false or true, // do not validate subsequent characters. @@ -78,20 +97,11 @@ const ( // P.S. Do not expect a significant decoding boost from this. jsonValidateSymbols = true - // if jsonTruncateMantissa, truncate mantissa if trailing 0's. - // This is important because it could allow some floats to be decoded without - // deferring to strconv.ParseFloat. - jsonTruncateMantissa = true - - // if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow - jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - - // if mantissa >= jsonNumUintMaxVal, this is an overflow - jsonNumUintMaxVal = 1< & + var i byte + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } + for j := range jsonU4Set { + switch i = byte(j); i { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + jsonU4Set[i] = i - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + jsonU4Set[i] = i - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + jsonU4Set[i] = i - 'A' + 10 + default: + jsonU4Set[i] = jsonU4SetErrVal + } + // switch i = byte(j); i { + // case 'e', 'E', '.': + // jsonIsFloatSet.set(i) + // } + } + // jsonU4Set[255] = jsonU4SetErrVal } type jsonEncDriver struct { @@ -127,46 +177,71 @@ type jsonEncDriver struct { // - newline and indent are added before each ending, // except there was no entry (so we can have {} or []) -func (e *jsonEncDriver) sendContainerState(c containerState) { - // determine whether to output separators - if c == containerMapKey { - if e.c != containerMapStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - } else if c == containerMapValue { - if e.d { - e.w.writen2(':', ' ') - } else { - e.w.writen1(':') - } - } else if c == containerMapEnd { - if e.d { - e.dl-- - if e.c != containerMapStart { - e.writeIndent() - } - } - e.w.writen1('}') - } else if c == containerArrayElem { - if e.c != containerArrayStart { - e.w.writen1(',') - } - if e.d { - e.writeIndent() - } - } else if c == containerArrayEnd { - if e.d { - e.dl-- - if e.c != containerArrayStart { - e.writeIndent() - } - } - e.w.writen1(']') +func (e *jsonEncDriver) WriteArrayStart(length int) { + if e.d { + e.dl++ } - e.c = c + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriver) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriver) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriver) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriver) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriver) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriver) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd } func (e *jsonEncDriver) writeIndent() { @@ -185,14 +260,31 @@ func (e *jsonEncDriver) writeIndent() { } func (e *jsonEncDriver) EncodeNil() { - e.w.writeb(jsonLiterals[9:13]) // null + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.w.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.w.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } } func (e *jsonEncDriver) EncodeBool(b bool) { - if b { - e.w.writeb(jsonLiterals[0:4]) // true + if e.h.MapKeyAsString && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } } else { - e.w.writeb(jsonLiterals[4:9]) // false + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } } } @@ -201,33 +293,56 @@ func (e *jsonEncDriver) EncodeFloat32(f float32) { } func (e *jsonEncDriver) EncodeFloat64(f float64) { - // e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64)) e.encodeFloat(f, 64) } func (e *jsonEncDriver) encodeFloat(f float64, numbits int) { - x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) - e.w.writeb(x) - if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 { - e.w.writen2('.', '0') + var blen int + var x []byte + if e.h.MapKeyAsString && e.c == containerMapKey { + e.b[0] = '"' + x = strconv.AppendFloat(e.b[1:1], f, 'G', -1, numbits) + blen = 1 + len(x) + if jsonIsFloatBytesB2(x) { + e.b[blen] = '"' + blen += 1 + } else { + e.b[blen] = '.' + e.b[blen+1] = '0' + e.b[blen+2] = '"' + blen += 3 + } + } else { + x = strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) + blen = len(x) + if !jsonIsFloatBytesB2(x) { + e.b[blen] = '.' + e.b[blen+1] = '0' + blen += 2 + } } + e.w.writeb(e.b[:blen]) } func (e *jsonEncDriver) EncodeInt(v int64) { - if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) { - e.w.writen1('"') - e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) - e.w.writen1('"') + x := e.h.IntegerAsString + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.h.MapKeyAsString && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) return } e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) } func (e *jsonEncDriver) EncodeUint(v uint64) { - if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { - e.w.writen1('"') - e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) - e.w.writen1('"') + x := e.h.IntegerAsString + if x == 'A' || x == 'L' && v > 1<<53 || (e.h.MapKeyAsString && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) return } e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) @@ -235,7 +350,7 @@ func (e *jsonEncDriver) EncodeUint(v uint64) { func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { if v := ext.ConvertExt(rv); v == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + e.EncodeNil() } else { en.encode(v) } @@ -244,35 +359,17 @@ func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Enco func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { // only encodes re.Value (never re.Data) if re.Value == nil { - e.w.writeb(jsonLiterals[9:13]) // null // e.EncodeNil() + e.EncodeNil() } else { en.encode(re.Value) } } -func (e *jsonEncDriver) EncodeArrayStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('[') - e.c = containerArrayStart -} - -func (e *jsonEncDriver) EncodeMapStart(length int) { - if e.d { - e.dl++ - } - e.w.writen1('{') - e.c = containerMapStart -} - func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { - // e.w.writestr(strconv.Quote(v)) e.quoteStr(v) } func (e *jsonEncDriver) EncodeSymbol(v string) { - // e.EncodeString(c_UTF8, v) e.quoteStr(v) } @@ -294,7 +391,6 @@ func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { e.w.writeb(e.bs) e.w.writen1('"') } else { - // e.EncodeString(c, string(v)) e.quoteStr(stringView(v)) } } @@ -308,12 +404,13 @@ func (e *jsonEncDriver) quoteStr(s string) { const hex = "0123456789abcdef" w := e.w w.writen1('"') - start := 0 - for i := 0; i < len(s); { + var start int + for i, slen := 0, len(s); i < slen; { // encode all bytes < 0x20 (except \r, \n). // also encode < > & to prevent security holes when served to some browsers. if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + if jsonCharHtmlSafeSet.isset(b) || (e.h.HTMLCharsAsIs && jsonCharSafeSet.isset(b)) { i++ continue } @@ -333,13 +430,6 @@ func (e *jsonEncDriver) quoteStr(s string) { w.writen2('\\', 'f') case '\t': w.writen2('\\', 't') - case '<', '>', '&': - if e.h.HTMLCharsAsIs { - w.writen1(b) - } else { - w.writestr(`\u00`) - w.writen2(hex[b>>4], hex[b&0xF]) - } default: w.writestr(`\u00`) w.writen2(hex[b>>4], hex[b&0xF]) @@ -378,86 +468,14 @@ func (e *jsonEncDriver) quoteStr(s string) { w.writen1('"') } -//-------------------------------- - -type jsonNum struct { - // bytes []byte // may have [+-.eE0-9] - mantissa uint64 // where mantissa ends, and maybe dot begins. - exponent int16 // exponent value. - manOverflow bool - neg bool // started with -. No initial sign in the bytes above. - dot bool // has dot - explicitExponent bool // explicit exponent -} - -func (x *jsonNum) reset() { - x.manOverflow = false - x.neg = false - x.dot = false - x.explicitExponent = false - x.mantissa = 0 - x.exponent = 0 -} - -// uintExp is called only if exponent > 0. -func (x *jsonNum) uintExp() (n uint64, overflow bool) { - n = x.mantissa - e := x.exponent - if e >= int16(len(jsonUint64Pow10)) { - overflow = true - return +func (e *jsonEncDriver) atEndOfEncode() { + if e.h.TermWhitespace { + if e.d { + e.w.writen1('\n') + } else { + e.w.writen1(' ') + } } - n *= jsonUint64Pow10[e] - if n < x.mantissa || n > jsonNumUintMaxVal { - overflow = true - return - } - return - // for i := int16(0); i < e; i++ { - // if n >= jsonNumUintCutoff { - // overflow = true - // return - // } - // n *= 10 - // } - // return -} - -// these constants are only used withn floatVal. -// They are brought out, so that floatVal can be inlined. -const ( - jsonUint64MantissaBits = 52 - jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1 -) - -func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) { - // We do not want to lose precision. - // Consequently, we will delegate to strconv.ParseFloat if any of the following happen: - // - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits) - // We expect up to 99.... (19 digits) - // - The mantissa cannot fit into a 52 bits of uint64 - // - The exponent is beyond our scope ie beyong 22. - parseUsingStrConv = x.manOverflow || - x.exponent > jsonMaxExponent || - (x.exponent < 0 && -(x.exponent) > jsonMaxExponent) || - x.mantissa>>jsonUint64MantissaBits != 0 - - if parseUsingStrConv { - return - } - - // all good. so handle parse here. - f = float64(x.mantissa) - // fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f) - if x.neg { - f = -f - } - if x.exponent > 0 { - f *= jsonFloat64Pow10[x.exponent] - } else if x.exponent < 0 { - f /= jsonFloat64Pow10[-x.exponent] - } - return } type jsonDecDriver struct { @@ -470,6 +488,8 @@ type jsonDecDriver struct { // tok is used to store the token read right after skipWhiteSpace. tok uint8 + fnull bool // found null from appendStringAsBytes + bstr [8]byte // scratch used for string \UXXX parsing b [64]byte // scratch, used for parsing strings or numbers b2 [64]byte // scratch, used only for decodeBytes (after base64) @@ -477,26 +497,14 @@ type jsonDecDriver struct { se setExtWrapper - n jsonNum + // n jsonNum } func jsonIsWS(b byte) bool { - return b == ' ' || b == '\t' || b == '\r' || b == '\n' + // return b == ' ' || b == '\t' || b == '\r' || b == '\n' + return jsonCharWhitespaceSet.isset(b) } -// // This will skip whitespace characters and return the next byte to read. -// // The next byte determines what the value will be one of. -// func (d *jsonDecDriver) skipWhitespace() { -// // fast-path: do not enter loop. Just check first (in case no whitespace). -// b := d.r.readn1() -// if jsonIsWS(b) { -// r := d.r -// for b = r.readn1(); jsonIsWS(b); b = r.readn1() { -// } -// } -// d.tok = b -// } - func (d *jsonDecDriver) uncacheRead() { if d.tok != 0 { d.r.unreadn1() @@ -504,107 +512,9 @@ func (d *jsonDecDriver) uncacheRead() { } } -func (d *jsonDecDriver) sendContainerState(c containerState) { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - var xc uint8 // char expected - if c == containerMapKey { - if d.c != containerMapStart { - xc = ',' - } - } else if c == containerMapValue { - xc = ':' - } else if c == containerMapEnd { - xc = '}' - } else if c == containerArrayElem { - if d.c != containerArrayStart { - xc = ',' - } - } else if c == containerArrayEnd { - xc = ']' - } - if xc != 0 { - if d.tok != xc { - d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) - } - d.tok = 0 - } - d.c = c -} - -func (d *jsonDecDriver) CheckBreak() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == '}' || d.tok == ']' { - // d.tok = 0 // only checking, not consuming - return true - } - return false -} - -func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) { - bs := d.r.readx(int(toIdx - fromIdx)) - d.tok = 0 - if jsonValidateSymbols { - if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) { - d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs) - return - } - } -} - -func (d *jsonDecDriver) TryDecodeAsNil() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'n' { - d.readStrIdx(10, 13) // ull - return true - } - return false -} - -func (d *jsonDecDriver) DecodeBool() bool { - if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b - } - if d.tok == 'f' { - d.readStrIdx(5, 9) // alse - return false - } - if d.tok == 't' { - d.readStrIdx(1, 4) // rue - return true - } - d.d.errorf("json: decode bool: got first char %c", d.tok) - return false // "unreachable" -} - func (d *jsonDecDriver) ReadMapStart() int { if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } if d.tok != '{' { d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok) @@ -616,11 +526,7 @@ func (d *jsonDecDriver) ReadMapStart() int { func (d *jsonDecDriver) ReadArrayStart() int { if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } if d.tok != '[' { d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok) @@ -630,14 +536,141 @@ func (d *jsonDecDriver) ReadArrayStart() int { return -1 } +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +func (d *jsonDecDriver) ReadArrayElem() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ']' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + const xc uint8 = ',' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = ':' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '}' + if d.tok != xc { + d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +// func (d *jsonDecDriver) readContainerState(c containerState, xc uint8, check bool) { +// if d.tok == 0 { +// d.tok = d.r.skip(&jsonCharWhitespaceSet) +// } +// if check { +// if d.tok != xc { +// d.d.errorf("json: expect char '%c' but got char '%c'", xc, d.tok) +// } +// d.tok = 0 +// } +// d.c = c +// } + +func (d *jsonDecDriver) readLit(length, fromIdx uint8) { + bs := d.r.readx(int(length)) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { + d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) + return + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // TODO: we shouldn't try to see if "null" was here, right? + // only "null" denotes a nil + if d.tok == 'n' { + d.readLit(3, jsonLitNull+1) // ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit(4, jsonLitFalse+1) // alse + // v = false + case 't': + d.readLit(3, jsonLitTrue+1) // rue + v = true + default: + d.d.errorf("json: decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + func (d *jsonDecDriver) ContainerType() (vt valueType) { // check container type by checking the first char if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } if b := d.tok; b == '{' { return valueTypeMap @@ -653,264 +686,51 @@ func (d *jsonDecDriver) ContainerType() (vt valueType) { // return false // "unreachable" } -func (d *jsonDecDriver) decNum(storeBytes bool) { - // If it is has a . or an e|E, decode as a float; else decode as an int. +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } - b := d.tok - var str bool - if b == '"' { - str = true - b = d.r.readn1() - } - if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { - d.d.errorf("json: decNum: got first char '%c'", b) - return + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) } d.tok = 0 + return bs +} - const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base) - const jsonNumUintMaxVal = 1<= jsonNumUintCutoff { - n.manOverflow = true - break - } - v := uint64(b - '0') - n.mantissa *= 10 - if v != 0 { - n1 := n.mantissa + v - if n1 < n.mantissa || n1 > jsonNumUintMaxVal { - n.manOverflow = true // n+v overflows - break - } - n.mantissa = n1 - } - case 6: - state = 7 - fallthrough - case 7: - if !(b == '0' && e == 0) { - e = e*10 + int16(b-'0') - } - default: - break LOOP - } - case '"': - if str { - if storeBytes { - d.bs = append(d.bs, '"') - } - b, eof = r.readn1eof() - } - break LOOP - default: - break LOOP - } - if storeBytes { - d.bs = append(d.bs, b) - } - b, eof = r.readn1eof() - } - - if jsonTruncateMantissa && n.mantissa != 0 { - for n.mantissa%10 == 0 { - n.mantissa /= 10 - n.exponent++ - } - } - - if e != 0 { - if eNeg { - n.exponent -= e - } else { - n.exponent += e - } - } - - // d.n = n - - if !eof { - if jsonUnreadAfterDecNum { - r.unreadn1() - } else { - if !jsonIsWS(b) { - d.tok = b - } - } - } - // fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n", - // n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex) return } func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) { - d.decNum(false) - n := &d.n - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) + bs := d.decNumBytes() + i, err := strconv.ParseInt(stringView(bs), 10, int(bitsize)) + if err != nil { + d.d.errorf("json: decode int from %s: %v", bs, err) return } - var u uint64 - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - i = int64(u) - if n.neg { - i = -i - } - if chkOvf.Int(i, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeInt: %v\n", i) - return -} - -// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number -func (d *jsonDecDriver) floatVal() (f float64) { - f, useStrConv := d.n.floatVal() - if useStrConv { - var err error - if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil { - panic(fmt.Errorf("parse float: %s, %v", d.bs, err)) - } - if d.n.neg { - f = -f - } - } - return -} - -func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) { - d.decNum(false) - n := &d.n - if n.neg { - d.d.errorf("json: unsigned integer cannot be negative") - return - } - if n.manOverflow { - d.d.errorf("json: overflow integer after: %v", n.mantissa) - return - } - if n.exponent == 0 { - u = n.mantissa - } else if n.exponent < 0 { - d.d.errorf("json: fractional integer") - return - } else if n.exponent > 0 { - var overflow bool - if u, overflow = n.uintExp(); overflow { - d.d.errorf("json: overflow integer") - return - } - } - if chkOvf.Uint(u, bitsize) { - d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs) - return - } - // fmt.Printf("DecodeUint: %v\n", u) return } func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { - d.decNum(true) - f = d.floatVal() - if chkOverflow32 && chkOvf.Float32(f) { - d.d.errorf("json: overflow float32: %v, %s", f, d.bs) + bs := d.decNumBytes() + bitsize := 64 + if chkOverflow32 { + bitsize = 32 + } + f, err := strconv.ParseFloat(stringView(bs), bitsize) + if err != nil { + d.d.errorf("json: decode float from %s: %v", bs, err) return } return @@ -929,23 +749,23 @@ func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta return } -func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. - if !isstring && d.se.i != nil { + if d.se.i != nil { bsOut = bs d.DecodeExt(&bsOut, 0, &d.se) return } d.appendStringAsBytes() - // if isstring, then just return the bytes, even if it is using the scratch buffer. - // the bytes will be converted to a string as needed. - if isstring { - return d.bs - } - // if appendStringAsBytes returned a zero-len slice, then treat as nil. - // This should only happen for null, and "". + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. if len(d.bs) == 0 { - return nil + if d.fnull { + return nil + } + return []byte{} } bs0 := d.bs slen := base64.StdEncoding.DecodedLen(len(bs0)) @@ -969,184 +789,228 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [ func (d *jsonDecDriver) DecodeString() (s string) { d.appendStringAsBytes() - // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key - if d.c == containerMapKey { - return d.d.string(d.bs) - } - return string(d.bs) + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs } func (d *jsonDecDriver) appendStringAsBytes() { if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } + d.fnull = false if d.tok != '"' { // d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) // handle non-string scalar: null, true, false or a number switch d.tok { case 'n': - d.readStrIdx(10, 13) // ull + d.readLit(3, jsonLitNull+1) // ull d.bs = d.bs[:0] + d.fnull = true case 'f': - d.readStrIdx(5, 9) // alse + d.readLit(4, jsonLitFalse+1) // alse d.bs = d.bs[:5] copy(d.bs, "false") case 't': - d.readStrIdx(1, 4) // rue + d.readLit(3, jsonLitTrue+1) // rue d.bs = d.bs[:4] copy(d.bs, "true") default: // try to parse a valid number - d.decNum(true) + bs := d.decNumBytes() + d.bs = d.bs[:len(bs)] + copy(d.bs, bs) } return } d.tok = 0 - - v := d.bs[:0] - var c uint8 r := d.r - for { - c = r.readn1() - if c == '"' { - break - } else if c == '\\' { - c = r.readn1() - switch c { - case '"', '\\', '/', '\'': - v = append(v, c) - case 'b': - v = append(v, '\b') - case 'f': - v = append(v, '\f') - case 'n': - v = append(v, '\n') - case 'r': - v = append(v, '\r') - case 't': - v = append(v, '\t') - case 'u': - rr := d.jsonU4(false) - // fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr)) - if utf16.IsSurrogate(rr) { - rr = utf16.DecodeRune(rr, d.jsonU4(true)) - } - w2 := utf8.EncodeRune(d.bstr[:], rr) - v = append(v, d.bstr[:w2]...) - default: - d.d.errorf("json: unsupported escaped value: %c", c) - } - } else { - v = append(v, c) + var cs = r.readUntil(d.b2[:0], '"') + var cslen = len(cs) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + for i, cursor := 0, 0; ; { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = len(cs) + i, cursor = 0, 0 } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if len(cs) < i+4 { // may help reduce bounds-checking + d.d.errorf(`json: need at least 4 more bytes for unicode sequence`) + } + // c = cs[i+4] // may help reduce bounds-checking + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + rr = rr*16 + uint32(c) + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' { + i += 2 + // c = cs[i+4] // may help reduce bounds-checking + var rr1 uint32 + for j := 1; j < 5; j++ { + c = jsonU4Set[cs[i+j]] + if c == jsonU4SetErrVal { + // d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, c) + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + rr1 = rr1*16 + uint32(c) + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + } else { + r = unicode.ReplacementChar + goto encode_rune + } + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("json: unsupported escaped value: %c", c) + } + i++ + cursor = i } d.bs = v } -func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune { - r := d.r - if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') { - d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`) - return 0 +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + if d.h.PreferFloat || jsonIsFloatBytesB3(bs) { // bytes.IndexByte(bs, '.') != -1 ||... + // } else if d.h.PreferFloat || bytes.ContainsAny(bs, ".eE") { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + } else if d.h.SignedInteger || bs[0] == '-' { + z.v = valueTypeInt + z.i, err = strconv.ParseInt(stringView(bs), 10, 64) + } else { + z.v = valueTypeUint + z.u, err = strconv.ParseUint(stringView(bs), 10, 64) } - // u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64) - var u uint32 - for i := 0; i < 4; i++ { - v := r.readn1() - if '0' <= v && v <= '9' { - v = v - '0' - } else if 'a' <= v && v <= 'z' { - v = v - 'a' + 10 - } else if 'A' <= v && v <= 'Z' { - v = v - 'A' + 10 - } else { - d.d.errorf(`json: unquoteStr: invalid hex char in \u unicode sequence: %q`, v) - return 0 + if err != nil && z.v != valueTypeFloat { + if v, ok := err.(*strconv.NumError); ok && (v.Err == strconv.ErrRange || v.Err == strconv.ErrSyntax) { + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) } - u = u*16 + uint32(v) } - return rune(u) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) } func (d *jsonDecDriver) DecodeNaked() { - z := &d.d.n + z := d.d.n // var decodeFurther bool if d.tok == 0 { - var b byte - r := d.r - for b = r.readn1(); jsonIsWS(b); b = r.readn1() { - } - d.tok = b + d.tok = d.r.skip(&jsonCharWhitespaceSet) } switch d.tok { case 'n': - d.readStrIdx(10, 13) // ull + d.readLit(3, jsonLitNull+1) // ull z.v = valueTypeNil case 'f': - d.readStrIdx(5, 9) // alse + d.readLit(4, jsonLitFalse+1) // alse z.v = valueTypeBool z.b = false case 't': - d.readStrIdx(1, 4) // rue + d.readLit(3, jsonLitTrue+1) // rue z.v = valueTypeBool z.b = true case '{': - z.v = valueTypeMap - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadMapStart - // decodeFurther = true + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart case '[': - z.v = valueTypeArray - // d.tok = 0 // don't consume. kInterfaceNaked will call ReadArrayStart - // decodeFurther = true + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart case '"': - z.v = valueTypeString - z.s = d.DecodeString() - default: // number - d.decNum(true) - n := &d.n - // if the string had a any of [.eE], then decode as float. - switch { - case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.exponent == 0: - u := n.mantissa - switch { - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false default: - z.v = valueTypeUint - z.u = u - } - default: - u, overflow := n.uintExp() - switch { - case overflow: - z.v = valueTypeFloat - z.f = d.floatVal() - case n.neg: - z.v = valueTypeInt - z.i = -int64(u) - case d.h.SignedInteger: - z.v = valueTypeInt - z.i = int64(u) - default: - z.v = valueTypeUint - z.u = u + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("json: decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("json: decode number from %s: %v", bs, err) + return } - // fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v) } // if decodeFurther { // d.s.sc.retryRead() @@ -1172,6 +1036,10 @@ func (d *jsonDecDriver) DecodeNaked() { // reading multiple values from a stream containing json and non-json content. // For example, a user can read a json value, then a cbor value, then a msgpack value, // all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs +// are not treated as an error. +// Instead, they are replaced by the Unicode replacement character U+FFFD. type JsonHandle struct { textEncodingType BasicHandle @@ -1203,8 +1071,28 @@ type JsonHandle struct { // By default, we encode them as \uXXX // to prevent security holes when served from some browsers. HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool } +func (h *JsonHandle) hasElemSeparators() bool { return true } + func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { return h.SetExt(rt, tag, &setExtWrapper{i: ext}) } @@ -1251,13 +1139,28 @@ func (d *jsonDecDriver) reset() { d.bs = d.bs[:0] } d.c, d.tok = 0, 0 - d.n.reset() + // d.n.reset() } -var jsonEncodeTerminate = []byte{' '} +// func jsonIsFloatBytes(bs []byte) bool { +// for _, v := range bs { +// // if v == '.' || v == 'e' || v == 'E' { +// if jsonIsFloatSet.isset(v) { +// return true +// } +// } +// return false +// } -func (h *JsonHandle) rpcEncodeTerminate() []byte { - return jsonEncodeTerminate +func jsonIsFloatBytesB2(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 +} + +func jsonIsFloatBytesB3(bs []byte) bool { + return bytes.IndexByte(bs, '.') != -1 || + bytes.IndexByte(bs, 'E') != -1 || + bytes.IndexByte(bs, 'e') != -1 } var _ decDriver = (*jsonDecDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl new file mode 100644 index 0000000000..9a46f13814 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED from mammoth-test.go.tmpl +// ************************************************************ + +package codec + +import "testing" +import "fmt" + +// TestMammoth has all the different paths optimized in fast-path +// It has all the primitives, slices and maps. +// +// For each of those types, it has a pointer and a non-pointer field. + +func init() { _ = fmt.Printf } // so we can include fmt as needed + +type TestMammoth struct { + +{{range .Values }}{{if .Primitive }}{{/* +*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }} +{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }} +{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }} +{{end}}{{end}}{{end}} + +{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }} +{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }} +{{end}}{{end}}{{end}} + +} + +{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} type {{ .MethodNamePfx "type" false }} []{{ .Elem }} +func (_ {{ .MethodNamePfx "type" false }}) MapBySlice() { } +{{end}}{{end}}{{end}} + +func doTestMammothSlices(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/* +*/}} + for _, v := range [][]{{ .Elem }}{ nil, []{{ .Elem }}{}, []{{ .Elem }}{ {{ nonzerocmd .Elem }}, {{ nonzerocmd .Elem }} } } { + // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2, v{{$i}}v3, v{{$i}}v4 []{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}") + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}") + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p") + // ... + v{{$i}}v2 = nil + if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) } + v{{$i}}v3 = {{ .MethodNamePfx "type" false }}(v{{$i}}v1) + bs{{$i}}, _ = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom") + v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) + testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom") + v{{$i}}v2 = nil + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p") + v{{$i}}v4 = {{ .MethodNamePfx "type" false }}(v{{$i}}v2) + testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p") + testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p") + } +{{end}}{{end}}{{end}} +} + +func doTestMammothMaps(t *testing.T, h Handle) { +{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey }}{{/* +*/}} + for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, map[{{ .MapKey }}]{{ .Elem }}{}, map[{{ .MapKey }}]{{ .Elem }}{ {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} } } { + // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) + var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }} + v{{$i}}v1 = v + bs{{$i}}, _ := testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}") + if v != nil { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } + testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}") + bs{{$i}}, _ = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p") + v{{$i}}v2 = nil + testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p") + testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p") + } +{{end}}{{end}}{{end}} + +} + +func doTestMammothMapsAndSlices(t *testing.T, h Handle) { + doTestMammothSlices(t, h) + doTestMammothMaps(t, h) +} diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go index 7309769739..fcc3177aaa 100644 --- a/vendor/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -104,7 +104,8 @@ var ( type msgpackEncDriver struct { noBuiltInTypes - encNoSeparator + encDriverNoopContainerWriter + // encNoSeparator e *Encoder w encWriter h *MsgpackHandle @@ -213,21 +214,22 @@ func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { } } -func (e *msgpackEncDriver) EncodeArrayStart(length int) { +func (e *msgpackEncDriver) WriteArrayStart(length int) { e.writeContainerLen(msgpackContainerList, length) } -func (e *msgpackEncDriver) EncodeMapStart(length int) { +func (e *msgpackEncDriver) WriteMapStart(length int) { e.writeContainerLen(msgpackContainerMap, length) } func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(s)) + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(s)) + e.writeContainerLen(msgpackContainerStr, slen) } - if len(s) > 0 { + if slen > 0 { e.w.writestr(s) } } @@ -237,12 +239,13 @@ func (e *msgpackEncDriver) EncodeSymbol(v string) { } func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + slen := len(bs) if c == c_RAW && e.h.WriteExt { - e.writeContainerLen(msgpackContainerBin, len(bs)) + e.writeContainerLen(msgpackContainerBin, slen) } else { - e.writeContainerLen(msgpackContainerStr, len(bs)) + e.writeContainerLen(msgpackContainerStr, slen) } - if len(bs) > 0 { + if slen > 0 { e.w.writeb(bs) } } @@ -272,8 +275,9 @@ type msgpackDecDriver struct { bdRead bool br bool // bytes reader noBuiltInTypes - noStreamingCodec - decNoSeparator + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader } // Note: This returns either a primitive (int, bool, etc) for non-containers, @@ -286,7 +290,7 @@ func (d *msgpackDecDriver) DecodeNaked() { d.readNextBd() } bd := d.bd - n := &d.d.n + n := d.d.n var decodeFurther bool switch bd { @@ -349,11 +353,11 @@ func (d *msgpackDecDriver) DecodeNaked() { n.s = d.DecodeString() } else { n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) + n.l = d.DecodeBytes(nil, false) } case bd == mpBin8, bd == mpBin16, bd == mpBin32: n.v = valueTypeBytes - n.l = d.DecodeBytes(nil, false, false) + n.l = d.DecodeBytes(nil, false) case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: n.v = valueTypeArray decodeFurther = true @@ -525,17 +529,46 @@ func (d *msgpackDecDriver) DecodeBool() (b bool) { return } -func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) { +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { if !d.bdRead { d.readNextBd() } + + // DecodeBytes could be from: bin str fixstr fixarray array ... var clen int - // ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin - if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { - clen = d.readContainerLen(msgpackContainerBin) - } else { + vt := d.ContainerType() + switch vt { + case valueTypeBytes: + // valueTypeBytes may be a mpBin or an mpStr container + if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + case valueTypeString: clen = d.readContainerLen(msgpackContainerStr) + case valueTypeArray: + clen = d.readContainerLen(msgpackContainerList) + // ensure everything after is one byte each + for i := 0; i < clen; i++ { + d.readNextBd() + if d.bd == mpNil { + bs = append(bs, 0) + } else if d.bd == mpUint8 { + bs = append(bs, d.r.readn1()) + } else { + d.d.errorf("cannot read non-byte into a byte array") + return + } + } + d.bdRead = false + return bs + default: + d.d.errorf("invalid container type: expecting bin|str|array") + return } + + // these are (bin|str)(8|16|32) // println("DecodeBytes: clen: ", clen) d.bdRead = false // bytes may be nil, so handle it. if nil, clen=-1. @@ -553,7 +586,11 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOu } func (d *msgpackDecDriver) DecodeString() (s string) { - return string(d.DecodeBytes(d.b[:], true, true)) + return string(d.DecodeBytes(d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.b[:], true) } func (d *msgpackDecDriver) readNextBd() { @@ -687,10 +724,10 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs } xbd := d.bd if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { - xbs = d.DecodeBytes(nil, false, true) + xbs = d.DecodeBytes(nil, true) } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { - xbs = d.DecodeBytes(nil, true, true) + xbs = d.DecodeStringAsBytes() } else { clen := d.readExtLen() xtag = d.r.readn1() @@ -725,6 +762,7 @@ type MsgpackHandle struct { // a []byte or string based on the setting of RawToString. WriteExt bool binaryEncodingType + noElemSeparators } func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { diff --git a/vendor/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go deleted file mode 100644 index cfee3d084d..0000000000 --- a/vendor/github.com/ugorji/go/codec/noop.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -package codec - -import ( - "math/rand" - "time" -) - -// NoopHandle returns a no-op handle. It basically does nothing. -// It is only useful for benchmarking, as it gives an idea of the -// overhead from the codec framework. -// -// LIBRARY USERS: *** DO NOT USE *** -func NoopHandle(slen int) *noopHandle { - h := noopHandle{} - h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) - h.B = make([][]byte, slen) - h.S = make([]string, slen) - for i := 0; i < len(h.S); i++ { - b := make([]byte, i+1) - for j := 0; j < len(b); j++ { - b[j] = 'a' + byte(i) - } - h.B[i] = b - h.S[i] = string(b) - } - return &h -} - -// noopHandle does nothing. -// It is used to simulate the overhead of the codec framework. -type noopHandle struct { - BasicHandle - binaryEncodingType - noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. -} - -type noopDrv struct { - d *Decoder - e *Encoder - i int - S []string - B [][]byte - mks []bool // stack. if map (true), else if array (false) - mk bool // top of stack. what container are we on? map or array? - ct valueType // last response for IsContainerType. - cb int // counter for ContainerType - rand *rand.Rand -} - -func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } -func (h *noopDrv) m(v int) int { h.i++; return h.i % v } - -func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h } -func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h } - -func (h *noopDrv) reset() {} -func (h *noopDrv) uncacheRead() {} - -// --- encDriver - -// stack functions (for map and array) -func (h *noopDrv) start(b bool) { - // println("start", len(h.mks)+1) - h.mks = append(h.mks, b) - h.mk = b -} -func (h *noopDrv) end() { - // println("end: ", len(h.mks)-1) - h.mks = h.mks[:len(h.mks)-1] - if len(h.mks) > 0 { - h.mk = h.mks[len(h.mks)-1] - } else { - h.mk = false - } -} - -func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) EncodeNil() {} -func (h *noopDrv) EncodeInt(i int64) {} -func (h *noopDrv) EncodeUint(i uint64) {} -func (h *noopDrv) EncodeBool(b bool) {} -func (h *noopDrv) EncodeFloat32(f float32) {} -func (h *noopDrv) EncodeFloat64(f float64) {} -func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} -func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } -func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } -func (h *noopDrv) EncodeEnd() { h.end() } - -func (h *noopDrv) EncodeString(c charEncoding, v string) {} -func (h *noopDrv) EncodeSymbol(v string) {} -func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} - -func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} - -// ---- decDriver -func (h *noopDrv) initReadNext() {} -func (h *noopDrv) CheckBreak() bool { return false } -func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } -func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} -func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } -func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } -func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } -func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } -func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } - -// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } - -func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } - -func (h *noopDrv) ReadEnd() { h.end() } - -// toggle map/slice -func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } -func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } - -func (h *noopDrv) ContainerType() (vt valueType) { - // return h.m(2) == 0 - // handle kStruct, which will bomb is it calls this and doesn't get back a map or array. - // consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2 - // for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs) - // however, every 10th time it is called, we just return something else. - var vals = [...]valueType{valueTypeArray, valueTypeMap} - // ------------ TAKE ------------ - // if h.cb%2 == 0 { - // if h.ct == valueTypeMap || h.ct == valueTypeArray { - // } else { - // h.ct = vals[h.m(2)] - // } - // } else if h.cb%5 == 0 { - // h.ct = valueType(h.m(8)) - // } else { - // h.ct = vals[h.m(2)] - // } - // ------------ TAKE ------------ - // if h.cb%16 == 0 { - // h.ct = valueType(h.cb % 8) - // } else { - // h.ct = vals[h.cb%2] - // } - h.ct = vals[h.cb%2] - h.cb++ - return h.ct - - // if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes { - // return h.ct - // } - // return valueTypeUnset - // TODO: may need to tweak this so it works. - // if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { - // h.cb = !h.cb - // h.ct = vt - // return h.cb - // } - // // go in a loop and check it. - // h.ct = vt - // h.cb = h.m(7) == 0 - // return h.cb -} -func (h *noopDrv) TryDecodeAsNil() bool { - if h.mk { - return false - } else { - return h.m(8) == 0 - } -} -func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { - return 0 -} - -func (h *noopDrv) DecodeNaked() { - // use h.r (random) not h.m() because h.m() could cause the same value to be given. - var sk int - if h.mk { - // if mapkey, do not support values of nil OR bytes, array, map or rawext - sk = h.r(7) + 1 - } else { - sk = h.r(12) - } - n := &h.d.n - switch sk { - case 0: - n.v = valueTypeNil - case 1: - n.v, n.b = valueTypeBool, false - case 2: - n.v, n.b = valueTypeBool, true - case 3: - n.v, n.i = valueTypeInt, h.DecodeInt(64) - case 4: - n.v, n.u = valueTypeUint, h.DecodeUint(64) - case 5: - n.v, n.f = valueTypeFloat, h.DecodeFloat(true) - case 6: - n.v, n.f = valueTypeFloat, h.DecodeFloat(false) - case 7: - n.v, n.s = valueTypeString, h.DecodeString() - case 8: - n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))] - case 9: - n.v = valueTypeArray - case 10: - n.v = valueTypeMap - default: - n.v = valueTypeExt - n.u = h.DecodeUint(64) - n.l = h.B[h.m(len(h.B))] - } - h.ct = n.v - return -} diff --git a/vendor/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go deleted file mode 100644 index 2353263e88..0000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.go +++ /dev/null @@ -1,3 +0,0 @@ -package codec - -//go:generate bash prebuild.sh diff --git a/vendor/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh deleted file mode 100755 index 04c61e48f2..0000000000 --- a/vendor/github.com/ugorji/go/codec/prebuild.sh +++ /dev/null @@ -1,205 +0,0 @@ -#!/bin/bash - -# _needgen is a helper function to tell if we need to generate files for msgp, codecgen. -_needgen() { - local a="$1" - zneedgen=0 - if [[ ! -e "$a" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - for i in `ls -1 *.go.tmpl gen.go values_test.go` - do - if [[ "$a" -ot "$i" ]] - then - zneedgen=1 - echo 1 - return 0 - fi - done - echo 0 -} - -# _build generates fast-path.go and gen-helper.go. -# -# It is needed because there is some dependency between the generated code -# and the other classes. Consequently, we have to totally remove the -# generated files and put stubs in place, before calling "go run" again -# to recreate them. -_build() { - if ! [[ "${zforce}" == "1" || - "1" == $( _needgen "fast-path.generated.go" ) || - "1" == $( _needgen "gen-helper.generated.go" ) || - "1" == $( _needgen "gen.generated.go" ) || - 1 == 0 ]] - then - return 0 - fi - - # echo "Running prebuild" - if [ "${zbak}" == "1" ] - then - # echo "Backing up old generated files" - _zts=`date '+%m%d%Y_%H%M%S'` - _gg=".generated.go" - [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak - [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak - [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak - # [ -e "safe${_gg}" ] && mv safe${_gg} safe${_gg}__${_zts}.bak - # [ -e "unsafe${_gg}" ] && mv unsafe${_gg} unsafe${_gg}__${_zts}.bak - fi - rm -f gen-helper.generated.go fast-path.generated.go \ - gen.generated.go \ - *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go - - cat > gen.generated.go <> gen.generated.go < gen-dec-map.go.tmpl - - cat >> gen.generated.go <> gen.generated.go < gen-dec-array.go.tmpl - - cat >> gen.generated.go < gen-from-tmpl.codec.generated.go < gen-from-tmpl.generated.go <>>>>>> TAGS: $ztags" - - OPTIND=1 - while getopts "_cdefgilmnrtsuvwxz" flag - do - case "x$flag" in - 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; - 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; - 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; - 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; - 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; - 'xd') printf ">>>>>>> INDENT : "; - go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; - go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; - sleep 2 ;; - *) ;; - esac - done - shift $((OPTIND-1)) - - OPTIND=1 -} - -# echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then - # All: r, x, g, gu - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset - _run "-w_tcinsed_ml" # regular with max init len - _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) - _run "-x_tcinsed_ml" # external - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe -elif [[ "x$@" = "x-Z" ]]; then - # Regular - _run "-_tcinsed_ml" # regular - _run "-_tcinsed_ml_z" # regular with reset -elif [[ "x$@" = "x-F" ]]; then - # regular with notfastpath - _run "-_tcinsed_ml_f" # regular - _run "-_tcinsed_ml_zf" # regular with reset -elif [[ "x$@" = "x-C" ]]; then - # codecgen - _run "-gx_tcinsed_ml" # codecgen: requires external - _run "-gxu_tcinsed_ml" # codecgen + unsafe - _run "-gxuw_tcinsed_ml" # codecgen + unsafe + maxinitlen -elif [[ "x$@" = "x-X" ]]; then - # external - _run "-x_tcinsed_ml" # external -elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then - cat < 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6b0dfae319..54ab4a88e7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -10,7 +10,6 @@ import ( "log" "net/http" "net/url" - "time" "golang.org/x/net/http2/hpack" "golang.org/x/net/lex/httplex" @@ -90,11 +89,7 @@ type writeGoAway struct { func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } + ctx.Flush() // ignore error: we're hanging up on them anyway return err } diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go index ec8232b2e6..7d328fcb0f 100644 --- a/vendor/golang.org/x/net/idna/idna.go +++ b/vendor/golang.org/x/net/idna/idna.go @@ -21,6 +21,7 @@ import ( "unicode/utf8" "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" "golang.org/x/text/unicode/norm" ) @@ -68,7 +69,7 @@ func VerifyDNSLength(verify bool) Option { } // RemoveLeadingDots removes leading label separators. Leading runes that map to -// dots, such as U+3002, are removed as well. +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. // // This is the behavior suggested by the UTS #46 and is adopted by some // browsers. @@ -92,7 +93,7 @@ func ValidateLabels(enable bool) Option { } } -// StrictDomainName limits the set of permissable ASCII characters to those +// StrictDomainName limits the set of permissible ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration. // @@ -142,7 +143,6 @@ func MapForLookup() Option { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) - RemoveLeadingDots(true)(o) } } @@ -160,7 +160,7 @@ type options struct { // mapping implements a validation and mapping step as defined in RFC 5895 // or UTS 46, tailored to, for example, domain registration or lookup. - mapping func(p *Profile, s string) (string, error) + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) // bidirule, if specified, checks whether s conforms to the Bidi Rule // defined in RFC 5893. @@ -251,23 +251,21 @@ var ( punycode = &Profile{} lookup = &Profile{options{ - transitional: true, - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} display = &Profile{options{ - useSTD3Rules: true, - validateLabels: true, - removeLeadingDots: true, - trie: trie, - fromPuny: validateFromPunycode, - mapping: validateAndMap, - bidirule: bidirule.ValidString, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, @@ -302,14 +300,16 @@ func (e runeError) Error() string { // see http://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error + var isBidi bool if p.mapping != nil { - s, err = p.mapping(p, s) + s, isBidi, err = p.mapping(p, s) } // Remove leading empty labels. if p.removeLeadingDots { for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } } + // TODO: allow for a quick check the tables data. // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. if err == nil && p.verifyDNSLength && s == "" { @@ -335,6 +335,7 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { // Spec says keep the old label. continue } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight labels.set(u) if err == nil && p.validateLabels { err = p.fromPuny(p, u) @@ -349,6 +350,14 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { err = p.validateLabel(label) } } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } if toASCII { for labels.reset(); !labels.done(); labels.next() { label := labels.label() @@ -380,16 +389,23 @@ func (p *Profile) process(s string, toASCII bool) (string, error) { return s, err } -func normalize(p *Profile, s string) (string, error) { - return norm.NFC.String(s), nil +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil } -func validateRegistration(p *Profile, s string) (string, error) { +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. if !norm.NFC.IsNormalString(s) { - return s, &labelError{s, "V1"} + return s, false, &labelError{s, "V1"} } for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + bidi = bidi || info(v).isBidi(s[i:]) // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow @@ -397,21 +413,41 @@ func validateRegistration(p *Profile, s string) (string, error) { case valid, deviation: case disallowed, mapped, unknown, ignored: r, _ := utf8.DecodeRuneInString(s[i:]) - return s, runeError(r) + return s, bidi, runeError(r) } i += sz } - return s, nil + return s, bidi, nil } -func validateAndMap(p *Profile, s string) (string, error) { +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { var ( - err error - b []byte - k int + b []byte + k int ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) start := i i += sz // Copy bytes not copied so far. @@ -438,7 +474,9 @@ func validateAndMap(p *Profile, s string) (string, error) { } if k == 0 { // No changes so far. - s = norm.NFC.String(s) + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } } else { b = append(b, s[k:]...) if norm.NFC.QuickSpan(b) != len(b) { @@ -447,7 +485,7 @@ func validateAndMap(p *Profile, s string) (string, error) { // TODO: the punycode converters require strings as input. s = string(b) } - return s, err + return s, bidi, err } // A labelIter allows iterating over domain name labels. @@ -542,6 +580,8 @@ func validateFromPunycode(p *Profile, s string) error { if !norm.NFC.IsNormalString(s) { return &labelError{s, "V1"} } + // TODO: detect whether string may have to be normalized in the following + // loop. for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) if c := p.simplify(info(v).category()); c != valid && c != deviation { @@ -616,16 +656,13 @@ var joinStates = [][numJoinTypes]joinState{ // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are // already implicitly satisfied by the overall implementation. -func (p *Profile) validateLabel(s string) error { +func (p *Profile) validateLabel(s string) (err error) { if s == "" { if p.verifyDNSLength { return &labelError{s, "A4"} } return nil } - if p.bidirule != nil && !p.bidirule(s) { - return &labelError{s, "B"} - } if !p.validateLabels { return nil } diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go index d2819345fc..f910b26914 100644 --- a/vendor/golang.org/x/net/idna/tables.go +++ b/vendor/golang.org/x/net/idna/tables.go @@ -3,7 +3,7 @@ package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" +const UnicodeVersion = "10.0.0" var mappings string = "" + // Size: 8176 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + @@ -544,7 +544,7 @@ func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { @@ -554,17 +554,17 @@ func newIdnaTrie(i int) *idnaTrie { // lookupValue determines the type of block n and looks up the value for b. func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { switch { - case n < 123: + case n < 125: return uint16(idnaValues[n<<6+uint32(b)]) default: - n -= 123 + n -= 125 return uint16(idnaSparse.lookup(n, b)) } } -// idnaValues: 125 blocks, 8000 entries, 16000 bytes +// idnaValues: 127 blocks, 8128 entries, 16256 bytes // The third block is the zero block. -var idnaValues = [8000]uint16{ +var idnaValues = [8128]uint16{ // Block 0x0, offset 0x0 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, @@ -675,14 +675,14 @@ var idnaValues = [8000]uint16{ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, - 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, - 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, - 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, - 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, - 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, - 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, @@ -723,8 +723,8 @@ var idnaValues = [8000]uint16{ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, - 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, @@ -759,129 +759,129 @@ var idnaValues = [8000]uint16{ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, // Block 0x11, offset 0x440 - 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, - 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, - 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, - 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, - 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, - 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, - 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, - 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, - 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, - 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, // Block 0x12, offset 0x480 - 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, - 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, - 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, - 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, - 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, - 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, - 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, - 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, - 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, - 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, - 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, // Block 0x13, offset 0x4c0 - 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, - 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, - 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, - 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, - 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, - 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, - 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, - 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, - 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, - 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, - 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, // Block 0x14, offset 0x500 - 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, - 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, - 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, - 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, - 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, - 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, - 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, - 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, - 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, - 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, - 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, // Block 0x15, offset 0x540 - 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, - 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, - 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, - 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, - 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, - 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, - 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, - 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, - 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, // Block 0x16, offset 0x580 - 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, - 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, - 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, - 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, - 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, - 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, - 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, - 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, - 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, // Block 0x17, offset 0x5c0 - 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, - 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, - 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, // Block 0x18, offset 0x600 - 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, - 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, - 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, - 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, - 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, - 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, - 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, // Block 0x1a, offset 0x680 - 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, - 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, - 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, - 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, - 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, @@ -889,1457 +889,1490 @@ var idnaValues = [8000]uint16{ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, // Block 0x1c, offset 0x700 - 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, - 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, - 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, - 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, - 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, - 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, - 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, - 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, - 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, // Block 0x1e, offset 0x780 - 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, - 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, - 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, - 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, - 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, - 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, - 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, - 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, - 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, - 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, // Block 0x20, offset 0x800 - 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, - 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, - 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, - 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, - 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, // Block 0x21, offset 0x840 - 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, - 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, - 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, - 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, - 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, - 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, - 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, - 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, - 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, // Block 0x22, offset 0x880 - 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, - 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, - 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, - 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, - 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, - 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, - 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, - 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, // Block 0x23, offset 0x8c0 - 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, - 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, - 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, - 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, - 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, - 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, - 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, - 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, - 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, - 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, // Block 0x24, offset 0x900 - 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, - 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, - 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, - 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, - 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, - 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, - 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, - 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, - 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, - 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, - 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, - 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, - 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, - 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, // Block 0x26, offset 0x980 - 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, - 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, - 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, - 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, - 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, - 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, - 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, - 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, - 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, - 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, - 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, - 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, - 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, - 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, - 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, - 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, - 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, // Block 0x28, offset 0xa00 - 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, - 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, - 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, - 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, - 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, - 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, - 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, - 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, - 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, - 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, - 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 - 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, - 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, - 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, - 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, - 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, - 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, - 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, - 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, - 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, - 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, // Block 0x2a, offset 0xa80 - 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, - 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, - 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, - 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, - 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, - 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, - 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, - 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, - 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, - 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, - 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, // Block 0x2b, offset 0xac0 - 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, - 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, - 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, - 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, - 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, - 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, - 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, - 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, - 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, - 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, - 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, // Block 0x2c, offset 0xb00 - 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, - 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, - 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, - 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, - 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, - 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, - 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 - 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, - 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, - 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, - 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, - 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, - 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, - 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, - 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, - 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, - 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, - 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, // Block 0x2e, offset 0xb80 - 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, - 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, - 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, - 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, - 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, - 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, - 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, - 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, - 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, - 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, - 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, // Block 0x2f, offset 0xbc0 - 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, - 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, - 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, - 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, - 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, - 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, - 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, - 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, // Block 0x30, offset 0xc00 - 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, - 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, - 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, - 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, - 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, - 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, - 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, - 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, - 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, - 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, - 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, // Block 0x31, offset 0xc40 - 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, - 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, - 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, - 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, - 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, - 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, - 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, - 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, - 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, - 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, - 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, // Block 0x32, offset 0xc80 - 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, - 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, - 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, - 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, - 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, - 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, - 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, - 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, - 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, - 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, - 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, // Block 0x33, offset 0xcc0 - 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, - 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, - 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, - 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, - 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, - 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, - 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, - 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, - 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, - 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, - 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, // Block 0x34, offset 0xd00 - 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, - 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, - 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, - 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, - 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, - 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, - 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, - 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, - 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, - 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, - 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, // Block 0x35, offset 0xd40 - 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, - 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, - 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, - 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, - 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, - 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, - 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, - 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, - 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, - 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, - 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, // Block 0x36, offset 0xd80 - 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, - 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, - 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, - 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, - 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, - 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, - 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, - 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, - 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, - 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, // Block 0x37, offset 0xdc0 - 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, - 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, - 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, - 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, - 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, - 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, - 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, - 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, - 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, - 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, - 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, // Block 0x38, offset 0xe00 - 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, - 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, - 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, - 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, - 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, - 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, - 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, - 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, - 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, - 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, // Block 0x39, offset 0xe40 - 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, - 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, - 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, - 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, - 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, - 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, - 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, - 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, - 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, - 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, - 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, // Block 0x3a, offset 0xe80 - 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, - 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, - 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, - 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, - 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, - 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, - 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, - 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, - 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, - 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, // Block 0x3b, offset 0xec0 - 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, - 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, - 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, - 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, - 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, - 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, - 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, - 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, - 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, - 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, - 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, // Block 0x3c, offset 0xf00 - 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, - 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, - 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, - 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, - 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, - 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, - 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, - 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, - 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, - 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, - 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, // Block 0x3d, offset 0xf40 - 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, - 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, - 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, - 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, - 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, - 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, - 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, - 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, - 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, - 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, - 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, // Block 0x3e, offset 0xf80 - 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, - 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, - 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, - 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, - 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, - 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, - 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, - 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, - 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, - 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, - 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, // Block 0x3f, offset 0xfc0 - 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, - 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, - 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, - 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, - 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, - 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, - 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, - 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, - 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, - 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, - 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, // Block 0x40, offset 0x1000 - 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, - 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, - 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, - 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, - 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, - 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, - 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, - 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, - 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, - 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, - 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, // Block 0x41, offset 0x1040 - 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, - 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, - 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, - 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, - 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, - 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, - 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, - 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, - 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, - 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, - 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, // Block 0x42, offset 0x1080 - 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, - 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, - 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, - 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, - 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, - 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, - 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, - 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, - 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, - 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, - 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, // Block 0x43, offset 0x10c0 - 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, - 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, - 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, - 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, - 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, - 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, - 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, - 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, - 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, - 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, - 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, // Block 0x44, offset 0x1100 - 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, - 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, - 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, - 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, - 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, - 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, - 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, - 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, - 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, - 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, - 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, - 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, - 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, - 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, // Block 0x46, offset 0x1180 - 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, - 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, - 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, - 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, - 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, - 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, - 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, - 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, // Block 0x47, offset 0x11c0 - 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, - 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, - 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, - 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, - 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, - 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, - 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, - 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, - 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, - 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, - 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, // Block 0x48, offset 0x1200 - 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, - 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, - 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, - 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, - 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, - 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, - 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, - 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, - 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, - 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 - 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, - 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, - 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, - 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, - 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, - 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, - 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, - 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, - 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, - 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, - 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, // Block 0x4a, offset 0x1280 - 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, - 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, - 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, - 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, - 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, - 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, - 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, - 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, - 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, - 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, - 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, // Block 0x4b, offset 0x12c0 - 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, - 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, - 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, - 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, - 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, - 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, - 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, - 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, - 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, - 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, - 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, // Block 0x4c, offset 0x1300 - 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, - 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, - 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, - 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, - 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, - 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, - 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, - 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, - 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, - 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, - 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, // Block 0x4d, offset 0x1340 - 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, - 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, - 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, - 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, - 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, - 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, - 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, - 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, - 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, - 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, - 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, // Block 0x4e, offset 0x1380 - 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, - 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, - 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, - 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, - 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, - 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, - 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, - 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, - 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, - 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, - 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, // Block 0x4f, offset 0x13c0 - 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, - 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, - 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, - 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, - 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, - 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, - 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, - 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, - 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, - 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, - 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, // Block 0x50, offset 0x1400 - 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, - 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, - 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, - 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, - 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, - 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, - 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, - 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, - 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, - 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, - 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, // Block 0x51, offset 0x1440 - 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, - 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, - 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, - 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, - 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, - 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, - 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, - 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, - 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, - 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, - 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, // Block 0x52, offset 0x1480 - 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, - 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, - 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, - 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, - 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, - 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, - 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, - 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, - 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, - 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, - 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, // Block 0x53, offset 0x14c0 - 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, - 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, - 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, - 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, - 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, - 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, - 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, - 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, - 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, - 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, - 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, // Block 0x54, offset 0x1500 - 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, - 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, - 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, - 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, - 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, - 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, - 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, - 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, - 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, // Block 0x55, offset 0x1540 - 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, - 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, - 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, - 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, - 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, - 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, - 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, - 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, - 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, - 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, - 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, // Block 0x56, offset 0x1580 - 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, - 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, - 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, - 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, - 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, - 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, - 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, - 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, - 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, - 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, - 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, // Block 0x57, offset 0x15c0 - 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, - 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, - 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, - 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, - 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, - 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, - 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, - 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, - 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, - 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, - 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, // Block 0x58, offset 0x1600 - 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, - 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, - 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, - 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, - 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, - 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, - 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, - 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, - 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, - 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, - 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, // Block 0x59, offset 0x1640 - 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, - 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, - 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, - 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, - 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, - 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, - 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, - 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, - 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, - 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, - 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, // Block 0x5a, offset 0x1680 - 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, - 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, - 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, - 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, - 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, - 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, - 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, - 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, - 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, - 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, - 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, // Block 0x5b, offset 0x16c0 - 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, - 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, - 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, - 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, - 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, - 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, - 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, - 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, - 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, - 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, - 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 - 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, - 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, - 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, - 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, - 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, - 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, - 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 - 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, - 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, - 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, - 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, - 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, - 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, - 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, - 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, - 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, - 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, // Block 0x5e, offset 0x1780 - 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, - 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, - 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, - 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, - 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, - 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, - 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 - 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, - 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, - 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, - 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, - 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, - 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, - 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, - 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, - 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, - 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, - 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, // Block 0x60, offset 0x1800 - 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, - 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, - 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, - 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, - 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, - 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, - 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, - 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, - 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, - 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, - 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, // Block 0x61, offset 0x1840 - 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, - 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, - 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, - 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, - 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, - 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, - 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, - 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, - 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, - 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, - 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, // Block 0x62, offset 0x1880 - 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, - 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, - 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, - 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, - 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, - 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, - 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, - 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, - 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, - 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, - 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, // Block 0x63, offset 0x18c0 - 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, - 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, - 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, - 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, - 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, - 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, - 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, - 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, - 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, - 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, - 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, // Block 0x64, offset 0x1900 - 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, - 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, - 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, - 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, - 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, - 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, - 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, - 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, - 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, - 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, - 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, // Block 0x65, offset 0x1940 - 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, - 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, - 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, - 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, - 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, - 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, - 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, - 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, - 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, - 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, - 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, // Block 0x66, offset 0x1980 - 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, - 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, - 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, - 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, - 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, - 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, - 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, - 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, - 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, - 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, - 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, // Block 0x67, offset 0x19c0 - 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, - 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, - 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, - 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, - 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, - 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, - 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, - 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, - 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, - 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, - 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, - 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, - 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, - 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, - 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, - 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, - 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, - 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, - 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, - 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, - 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, // Block 0x69, offset 0x1a40 - 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, - 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, - 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, - 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, - 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, - 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, - 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, - 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, - 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, - 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, - 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, // Block 0x6a, offset 0x1a80 - 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, - 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, - 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, - 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, - 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, - 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, - 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, - 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, - 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, - 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, - 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, - 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, - 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, - 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, - 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, - 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, - 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, - 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, - 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, - 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, - 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, // Block 0x6c, offset 0x1b00 - 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, - 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, - 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, - 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, - 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, - 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, - 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, - 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, - 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, - 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, - 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, // Block 0x6d, offset 0x1b40 - 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, - 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, - 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, - 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, - 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, - 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, - 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, - 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, - 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, - 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, - 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, // Block 0x6e, offset 0x1b80 - 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, - 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, - 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, - 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, - 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, - 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, - 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, - 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, - 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, - 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, - 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, - 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, - 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, - 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, - 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, - 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, - 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, - 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, - 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, - 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, - 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, // Block 0x70, offset 0x1c00 - 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, - 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, - 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, - 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, - 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, - 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, - 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, - 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, - 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, - 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, - 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, // Block 0x71, offset 0x1c40 - 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, - 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, - 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, - 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, - 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, - 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, - 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, - 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, - 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, - 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, - 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, - 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, - 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, - 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, - 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, - 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, - 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, - 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, - 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, - 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, - 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, - 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, - 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, - 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, // Block 0x74, offset 0x1d00 - 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, - 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, - 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, - 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, - 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, - 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, - 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, - 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, - 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, - 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, - 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, // Block 0x75, offset 0x1d40 - 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, - 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, - 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, - 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, - 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, - 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, - 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, - 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, - 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, - 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, - 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, // Block 0x76, offset 0x1d80 - 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, - 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, - 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, - 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, - 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, - 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, - 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, - 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, - 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, - 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, - 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, - 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, - 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, - 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, - 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, - 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, - 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, - 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, - 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, - 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, - 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, // Block 0x78, offset 0x1e00 - 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, - 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, - 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, - 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, - 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, - 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, - 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, - 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, - 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, - 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, - 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, // Block 0x79, offset 0x1e40 - 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, - 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, - 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, - 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, - 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, - 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, - 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, - 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, - 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, - 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, - 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, // Block 0x7a, offset 0x1e80 - 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, - 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, - 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, - 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, - 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, - 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, - 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, - 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, - 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, - 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, - 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, - 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, - 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, - 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, - 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, - 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, - 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, - 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, - 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, - 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, - 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, // Block 0x7c, offset 0x1f00 - 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, - 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, - 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, - 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, - 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, - 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, - 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, - 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, - 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, - 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, - 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, } -// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes // Block 0 is the zero block. -var idnaIndex = [2240]uint16{ +var idnaIndex = [2304]uint16{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, - 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, // Block 0x4, offset 0x100 - 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, - 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, - 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, - 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, - 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, - 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, - 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, - 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, - 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, // Block 0x6, offset 0x180 - 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, - 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, - 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, - 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, - 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, // Block 0x7, offset 0x1c0 - 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, - 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, - 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, - 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, - 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, - 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, - 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, - 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, // Block 0x8, offset 0x200 - 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, - 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, - 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, - 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, - 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, - 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, - 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, - 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, // Block 0x9, offset 0x240 - 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, - 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, - 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, - 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, - 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, - 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, - 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, - 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, // Block 0xa, offset 0x280 - 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, - 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, - 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, - 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, - 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, - 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, - 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, - 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, // Block 0xb, offset 0x2c0 - 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, - 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, - 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, - 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, - 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, - 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, - 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, - 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, // Block 0xc, offset 0x300 - 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, - 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, - 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, - 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, // Block 0xd, offset 0x340 - 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, - 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, - 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, - 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, - 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, - 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, - 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, - 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, // Block 0xe, offset 0x380 - 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, - 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, - 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, - 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, - 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, - 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, - 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, - 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, // Block 0xf, offset 0x3c0 - 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, - 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, - 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, - 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, - 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, - 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, - 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, - 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, // Block 0x10, offset 0x400 - 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, - 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, - 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, - 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, - 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, - 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, - 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, - 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, // Block 0x11, offset 0x440 - 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, - 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, - 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, - 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, - 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, - 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, - 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, - 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, // Block 0x12, offset 0x480 - 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, - 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, - 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, - 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, - 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, - 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, - 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, - 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, // Block 0x13, offset 0x4c0 - 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, - 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, - 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, - 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, - 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, - 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, - 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, - 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, // Block 0x14, offset 0x500 - 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, - 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, - 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, - 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, - 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, - 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, - 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, - 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, // Block 0x15, offset 0x540 - 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, - 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, - 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, - 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, - 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, - 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, - 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, - 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, // Block 0x16, offset 0x580 - 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, - 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, - 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, - 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, - 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, - 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, - 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, - 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, - 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, - 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, - 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, - 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, - 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, // Block 0x18, offset 0x600 - 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, - 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, - 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, - 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, - 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, - 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, - 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, - 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, // Block 0x19, offset 0x640 - 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, - 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, - 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, - 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, - 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, - 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, // Block 0x1a, offset 0x680 - 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, - 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, - 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, - 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, - 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, - 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, - 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, - 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, - 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, - 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, - 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, - 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, - 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, - 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, - 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, // Block 0x1c, offset 0x700 - 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, - 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, - 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, - 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, - 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, - 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, - 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, - 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, // Block 0x1d, offset 0x740 - 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, - 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, - 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, - 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, - 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, - 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, - 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, - 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, // Block 0x1e, offset 0x780 - 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, - 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, - 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, - 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, - 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, - 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, - 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, - 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, - 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, - 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, - 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, // Block 0x20, offset 0x800 - 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, - 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, - 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, - 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, - 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, - 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, - 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, - 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, // Block 0x21, offset 0x840 - 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, - 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, - 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, - 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, - 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, - 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, - 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, - 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, // Block 0x22, offset 0x880 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, } -// idnaSparseOffset: 256 entries, 512 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} -// idnaSparseValues: 1876 entries, 7504 bytes -var idnaSparseValues = [1876]valueRange{ +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, @@ -2382,7 +2415,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, - {value: 0x1308, lo: 0x80, hi: 0xbf}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, // Block 0x4, offset 0x27 {value: 0x0000, lo: 0x04}, {value: 0x03f5, lo: 0x80, hi: 0x8f}, @@ -2407,155 +2440,123 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x8b, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x1308, lo: 0x91, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, // Block 0x7, offset 0x3f {value: 0x0000, lo: 0x0b}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xaa}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x8, offset 0x4b - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x88}, - {value: 0x0208, lo: 0x89, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xad}, - {value: 0x0208, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb4}, - {value: 0x0429, lo: 0xb5, hi: 0xb5}, - {value: 0x0451, lo: 0xb6, hi: 0xb6}, - {value: 0x0479, lo: 0xb7, hi: 0xb7}, - {value: 0x04a1, lo: 0xb8, hi: 0xb8}, - {value: 0x0208, lo: 0xb9, hi: 0xbf}, - // Block 0x9, offset 0x5c {value: 0x0000, lo: 0x03}, - {value: 0x0208, lo: 0x80, hi: 0x87}, - {value: 0x0408, lo: 0x88, hi: 0x99}, - {value: 0x0208, lo: 0x9a, hi: 0xbf}, - // Block 0xa, offset 0x60 + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f {value: 0x0000, lo: 0x0e}, - {value: 0x1308, lo: 0x80, hi: 0x8a}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0408, lo: 0x8d, hi: 0x8d}, - {value: 0x0208, lo: 0x8e, hi: 0x98}, - {value: 0x0408, lo: 0x99, hi: 0x9b}, - {value: 0x0208, lo: 0x9c, hi: 0xaa}, - {value: 0x0408, lo: 0xab, hi: 0xac}, - {value: 0x0208, lo: 0xad, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb1}, - {value: 0x0208, lo: 0xb2, hi: 0xb2}, - {value: 0x0408, lo: 0xb3, hi: 0xb4}, - {value: 0x0208, lo: 0xb5, hi: 0xb7}, - {value: 0x0408, lo: 0xb8, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbf}, - // Block 0xb, offset 0x6f + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xc, offset 0x74 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xba}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0xd, offset 0x7b + // Block 0xc, offset 0x6b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa3}, - {value: 0x0008, lo: 0xa4, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xad}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbe}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xe, offset 0x87 - {value: 0x0000, lo: 0x0d}, - {value: 0x0408, lo: 0x80, hi: 0x80}, - {value: 0x0208, lo: 0x81, hi: 0x85}, - {value: 0x0408, lo: 0x86, hi: 0x87}, - {value: 0x0208, lo: 0x88, hi: 0x88}, - {value: 0x0408, lo: 0x89, hi: 0x89}, - {value: 0x0208, lo: 0x8a, hi: 0x93}, - {value: 0x0408, lo: 0x94, hi: 0x94}, - {value: 0x0208, lo: 0x95, hi: 0x95}, - {value: 0x0008, lo: 0x96, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf, offset 0x95 + // Block 0xd, offset 0x77 {value: 0x0000, lo: 0x0d}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xa9}, - {value: 0x0408, lo: 0xaa, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0408, lo: 0xae, hi: 0xae}, - {value: 0x0208, lo: 0xaf, hi: 0xb0}, - {value: 0x0408, lo: 0xb1, hi: 0xb2}, - {value: 0x0208, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0208, lo: 0xb6, hi: 0xb8}, - {value: 0x0408, lo: 0xb9, hi: 0xb9}, - {value: 0x0208, lo: 0xba, hi: 0xbd}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0xa3 + // Block 0xe, offset 0x85 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xbf}, - // Block 0x11, offset 0xa8 + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb1 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 {value: 0x0000, lo: 0x0f}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x1008, lo: 0x86, hi: 0x88}, + {value: 0x3008, lo: 0x86, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x1008, lo: 0x8a, hi: 0x8c}, - {value: 0x1b08, lo: 0x8d, hi: 0x8d}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x97, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x13, offset 0xc1 + // Block 0x11, offset 0xa3 {value: 0x0000, lo: 0x0d}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, @@ -2566,25 +2567,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xaa, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xcf - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x15, offset 0xdc + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x83}, + {value: 0x3008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x99}, @@ -2594,50 +2594,50 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x16, offset 0xe8 + // Block 0x14, offset 0xc9 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x1b08, lo: 0x8a, hi: 0x8a}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x94}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, + {value: 0x3308, lo: 0x96, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x1008, lo: 0x98, hi: 0x9f}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x17, offset 0xf9 + // Block 0x15, offset 0xda {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb9}, - {value: 0x1b08, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x18, offset 0x103 + // Block 0x16, offset 0xe4 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x8e}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x19, offset 0x10a + // Block 0x17, offset 0xeb {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x85}, {value: 0x0008, lo: 0x86, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x8d}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -2645,76 +2645,76 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0999, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1a, offset 0x117 + // Block 0x18, offset 0xf8 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8a}, {value: 0x0008, lo: 0x8b, hi: 0x8b}, {value: 0xe03d, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x1b, offset 0x128 + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1c, offset 0x12f + // Block 0x1a, offset 0x110 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbe}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1d, offset 0x13a + // Block 0x1b, offset 0x11b {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x1008, lo: 0x96, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x99}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x1308, lo: 0x9e, hi: 0xa0}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x1008, lo: 0xa2, hi: 0xa4}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xad}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb4}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1e, offset 0x149 + // Block 0x1c, offset 0x12a {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x8c}, - {value: 0x1308, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x1008, lo: 0x8f, hi: 0x8f}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x1008, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1f, offset 0x157 + // Block 0x1d, offset 0x138 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x86}, {value: 0x055d, lo: 0x87, hi: 0x87}, @@ -2725,18 +2725,27 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0xe105, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x20, offset 0x161 + // Block 0x1e, offset 0x142 {value: 0x0000, lo: 0x01}, {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x21, offset 0x163 - {value: 0x0000, lo: 0x03}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbf}, - // Block 0x22, offset 0x167 + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f {value: 0x0000, lo: 0x01}, {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x169 + // Block 0x23, offset 0x151 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2749,7 +2758,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x9a, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x175 + // Block 0x24, offset 0x15d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, @@ -2761,7 +2770,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x180 + // Block 0x25, offset 0x168 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x81}, @@ -2770,146 +2779,146 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0x88, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x188 + // Block 0x26, offset 0x170 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x18e + // Block 0x27, offset 0x176 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x194 + // Block 0x28, offset 0x17c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x199 + // Block 0x29, offset 0x181 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x19e + // Block 0x2a, offset 0x186 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x1a1 + // Block 0x2b, offset 0x189 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x1a5 + // Block 0x2c, offset 0x18d {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x1ab + // Block 0x2d, offset 0x193 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x1b0 + // Block 0x2e, offset 0x198 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, - {value: 0x1b08, lo: 0x94, hi: 0x94}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1bc + // Block 0x2f, offset 0x1a4 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0x93}, + {value: 0x3308, lo: 0x92, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1c6 + // Block 0x30, offset 0x1ae {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x1340, lo: 0xb4, hi: 0xb5}, - {value: 0x1008, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1cc + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 {value: 0x0000, lo: 0x10}, - {value: 0x1008, lo: 0x80, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, - {value: 0x1008, lo: 0x87, hi: 0x88}, - {value: 0x1308, lo: 0x89, hi: 0x91}, - {value: 0x1b08, lo: 0x92, hi: 0x92}, - {value: 0x1308, lo: 0x93, hi: 0x93}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0x96}, {value: 0x0008, lo: 0x97, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1dd + // Block 0x32, offset 0x1c5 {value: 0x0000, lo: 0x09}, {value: 0x0018, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x86}, {value: 0x0218, lo: 0x87, hi: 0x87}, {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x13c0, lo: 0x8b, hi: 0x8d}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1e7 + // Block 0x33, offset 0x1cf {value: 0x0000, lo: 0x02}, {value: 0x0208, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x34, offset 0x1ea + // Block 0x34, offset 0x1d2 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, {value: 0x0208, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1f2 + // Block 0x35, offset 0x1da {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1f5 + // Block 0x36, offset 0x1dd {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xab}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb8}, - {value: 0x1308, lo: 0xb9, hi: 0xbb}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x202 + // Block 0x37, offset 0x1ea {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x83}, @@ -2918,12 +2927,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x20a + // Block 0x38, offset 0x1f2 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x20e + // Block 0x39, offset 0x1f6 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2931,33 +2940,33 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0028, lo: 0x9a, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x215 + // Block 0x3a, offset 0x1fd {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x1308, lo: 0x97, hi: 0x98}, - {value: 0x1008, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9b}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x21d + // Block 0x3b, offset 0x205 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x1008, lo: 0x95, hi: 0x95}, - {value: 0x1308, lo: 0x96, hi: 0x96}, - {value: 0x1008, lo: 0x97, hi: 0x97}, - {value: 0x1308, lo: 0x98, hi: 0x9e}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1b08, lo: 0xa0, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xac}, - {value: 0x1008, lo: 0xad, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xbc}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x22d + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, @@ -2967,78 +2976,78 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xbd}, - {value: 0x1318, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x239 + // Block 0x3d, offset 0x221 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x23b + // Block 0x3e, offset 0x223 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x83}, - {value: 0x1008, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x245 + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d {value: 0x0000, lo: 0x0b}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, - {value: 0x1808, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xb3}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x251 + // Block 0x40, offset 0x239 {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1808, lo: 0xaa, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x25d + // Block 0x41, offset 0x245 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa9}, - {value: 0x1008, lo: 0xaa, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1808, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x269 + // Block 0x42, offset 0x251 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x1008, lo: 0xa4, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x271 + // Block 0x43, offset 0x259 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x276 + // Block 0x44, offset 0x25e {value: 0x0000, lo: 0x09}, {value: 0x0e29, lo: 0x80, hi: 0x80}, {value: 0x0e41, lo: 0x81, hi: 0x81}, @@ -3049,30 +3058,30 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0eb9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0x45, offset 0x280 + // Block 0x45, offset 0x268 {value: 0x0000, lo: 0x10}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x92}, + {value: 0x3308, lo: 0x90, hi: 0x92}, {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x1308, lo: 0x94, hi: 0xa0}, - {value: 0x1008, lo: 0xa1, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa8}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, + {value: 0x3308, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xb9}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x46, offset 0x291 + // Block 0x46, offset 0x279 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x295 + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x87}, {value: 0xe045, lo: 0x88, hi: 0x8f}, @@ -3084,12 +3093,12 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe045, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x2a0 + // Block 0x48, offset 0x288 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x1318, lo: 0x90, hi: 0xb0}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x2a4 + // Block 0x49, offset 0x28c {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x83}, @@ -3099,7 +3108,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x2ad + // Block 0x4a, offset 0x295 {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x24f1, lo: 0xac, hi: 0xac}, @@ -3108,72 +3117,68 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2579, lo: 0xaf, hi: 0xaf}, {value: 0x25b1, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x2b5 + // Block 0x4b, offset 0x29d {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x9f}, {value: 0x0080, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xad}, {value: 0x0080, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2bb + // Block 0x4c, offset 0x2a3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xa8}, {value: 0x09c5, lo: 0xa9, hi: 0xa9}, {value: 0x09e5, lo: 0xaa, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2c0 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x4e, offset 0x2c3 + // Block 0x4d, offset 0x2a8 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4f, offset 0x2c6 + // Block 0x4e, offset 0x2ab {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x28c1, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x50, offset 0x2ca + // Block 0x4f, offset 0x2af {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e66, lo: 0xb4, hi: 0xb4}, {value: 0x292a, lo: 0xb5, hi: 0xb5}, {value: 0x0e86, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x51, offset 0x2d0 + // Block 0x50, offset 0x2b5 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, {value: 0x2941, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x52, offset 0x2d4 + // Block 0x51, offset 0x2b9 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x53, offset 0x2d8 + // Block 0x52, offset 0x2bd {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0x54, offset 0x2de + // Block 0x53, offset 0x2c3 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, {value: 0x0018, lo: 0xac, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2e5 + // Block 0x54, offset 0x2ca {value: 0x0000, lo: 0x05}, {value: 0xe185, lo: 0x80, hi: 0x8f}, {value: 0x03f5, lo: 0x90, hi: 0x9f}, {value: 0x0ea5, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x56, offset 0x2eb + // Block 0x55, offset 0x2d0 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xa6}, @@ -3182,15 +3187,15 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x57, offset 0x2f3 + // Block 0x56, offset 0x2d8 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xae}, {value: 0xe075, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0x58, offset 0x2fa + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, @@ -3202,7 +3207,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x59, offset 0x305 + // Block 0x58, offset 0x2ea {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3212,62 +3217,62 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xbf}, - // Block 0x5a, offset 0x30f + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x5b, offset 0x313 + // Block 0x5a, offset 0x2f8 {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0xbf}, - // Block 0x5c, offset 0x316 + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9e}, {value: 0x0edd, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5d, offset 0x31c + // Block 0x5c, offset 0x301 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb2}, {value: 0x0efd, lo: 0xb3, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5e, offset 0x320 + // Block 0x5d, offset 0x305 {value: 0x0020, lo: 0x01}, {value: 0x0f1d, lo: 0x80, hi: 0xbf}, - // Block 0x5f, offset 0x322 + // Block 0x5e, offset 0x307 {value: 0x0020, lo: 0x02}, {value: 0x171d, lo: 0x80, hi: 0x8f}, {value: 0x18fd, lo: 0x90, hi: 0xbf}, - // Block 0x60, offset 0x325 + // Block 0x5f, offset 0x30a {value: 0x0020, lo: 0x01}, {value: 0x1efd, lo: 0x80, hi: 0xbf}, - // Block 0x61, offset 0x327 + // Block 0x60, offset 0x30c {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x62, offset 0x32a + // Block 0x61, offset 0x30f {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x1308, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, {value: 0x29e2, lo: 0x9b, hi: 0x9b}, {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, {value: 0x2a31, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x63, offset 0x334 + // Block 0x62, offset 0x319 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x64, offset 0x337 + // Block 0x63, offset 0x31c {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, @@ -3279,150 +3284,150 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x2afd, lo: 0xba, hi: 0xbb}, {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, {value: 0x2afd, lo: 0xbe, hi: 0xbf}, - // Block 0x65, offset 0x346 + // Block 0x64, offset 0x32b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x66, offset 0x34a + // Block 0x65, offset 0x32f {value: 0x0030, lo: 0x04}, {value: 0x2aa2, lo: 0x80, hi: 0x9d}, {value: 0x305a, lo: 0x9e, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x67, offset 0x34f + // Block 0x66, offset 0x334 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x68, offset 0x352 + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x69, offset 0x356 + // Block 0x68, offset 0x33b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x6a, offset 0x35b + // Block 0x69, offset 0x340 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x6b, offset 0x360 + // Block 0x6a, offset 0x345 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6c, offset 0x366 + // Block 0x6b, offset 0x34b {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xb7}, {value: 0x2009, lo: 0xb8, hi: 0xb8}, {value: 0x6e89, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6d, offset 0x36c + // Block 0x6c, offset 0x351 {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x1308, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x1308, lo: 0x8b, hi: 0x8b}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa6}, - {value: 0x1008, lo: 0xa7, hi: 0xa7}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6e, offset 0x37b + // Block 0x6d, offset 0x360 {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6f, offset 0x381 + // Block 0x6e, offset 0x366 {value: 0x0000, lo: 0x03}, - {value: 0x1008, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xbf}, - // Block 0x70, offset 0x385 + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a {value: 0x0000, lo: 0x0e}, - {value: 0x1008, lo: 0x80, hi: 0x83}, - {value: 0x1b08, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x85}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0008, lo: 0xbb, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x71, offset 0x394 + // Block 0x70, offset 0x379 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xad}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x72, offset 0x399 + // Block 0x71, offset 0x37e {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x1308, lo: 0x87, hi: 0x91}, - {value: 0x1008, lo: 0x92, hi: 0x92}, - {value: 0x1808, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x73, offset 0x3a1 + // Block 0x72, offset 0x386 {value: 0x0000, lo: 0x09}, - {value: 0x1308, lo: 0x80, hi: 0x82}, - {value: 0x1008, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb9}, - {value: 0x1008, lo: 0xba, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbf}, - // Block 0x74, offset 0x3ab + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 {value: 0x0000, lo: 0x0a}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x75, offset 0x3b6 + // Block 0x74, offset 0x39b {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x1308, lo: 0xa9, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb0}, - {value: 0x1308, lo: 0xb1, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x76, offset 0x3be + // Block 0x75, offset 0x3a3 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8c}, - {value: 0x1008, lo: 0x8d, hi: 0x8d}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, @@ -3430,38 +3435,38 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbc}, - {value: 0x1008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x77, offset 0x3cf + // Block 0x76, offset 0x3b4 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb0}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb4}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb8}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbf}, - // Block 0x78, offset 0x3d8 + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, + {value: 0x3308, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x9a}, {value: 0x0008, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x1008, lo: 0xab, hi: 0xab}, - {value: 0x1308, lo: 0xac, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb5}, - {value: 0x1b08, lo: 0xb6, hi: 0xb6}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x79, offset 0x3e8 + // Block 0x78, offset 0x3cd {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, @@ -3475,7 +3480,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3f5 + // Block 0x79, offset 0x3da {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, @@ -3486,54 +3491,54 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xaf}, {value: 0x4495, lo: 0xb0, hi: 0xbf}, - // Block 0x7b, offset 0x3ff + // Block 0x7a, offset 0x3e4 {value: 0x0000, lo: 0x04}, {value: 0x44b5, lo: 0x80, hi: 0x8f}, {value: 0x44d5, lo: 0x90, hi: 0x9f}, {value: 0x44f5, lo: 0xa0, hi: 0xaf}, {value: 0x44d5, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x404 + // Block 0x7b, offset 0x3e9 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x1008, lo: 0xa3, hi: 0xa4}, - {value: 0x1308, lo: 0xa5, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa7}, - {value: 0x1308, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xaa}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1b08, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7d, offset 0x411 + // Block 0x7c, offset 0x3f6 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7e, offset 0x415 + // Block 0x7d, offset 0x3fa {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7f, offset 0x41a + // Block 0x7e, offset 0x3ff {value: 0x0020, lo: 0x01}, {value: 0x4515, lo: 0x80, hi: 0xbf}, - // Block 0x80, offset 0x41c + // Block 0x7f, offset 0x401 {value: 0x0020, lo: 0x03}, {value: 0x4d15, lo: 0x80, hi: 0x94}, {value: 0x4ad5, lo: 0x95, hi: 0x95}, {value: 0x4fb5, lo: 0x96, hi: 0xbf}, - // Block 0x81, offset 0x420 + // Block 0x80, offset 0x405 {value: 0x0020, lo: 0x01}, {value: 0x54f5, lo: 0x80, hi: 0xbf}, - // Block 0x82, offset 0x422 + // Block 0x81, offset 0x407 {value: 0x0020, lo: 0x03}, {value: 0x5cf5, lo: 0x80, hi: 0x84}, {value: 0x5655, lo: 0x85, hi: 0x85}, {value: 0x5d95, lo: 0x86, hi: 0xbf}, - // Block 0x83, offset 0x426 + // Block 0x82, offset 0x40b {value: 0x0020, lo: 0x08}, {value: 0x6b55, lo: 0x80, hi: 0x8f}, {value: 0x6d15, lo: 0x90, hi: 0x90}, @@ -3543,19 +3548,19 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x70d5, lo: 0xb0, hi: 0xbf}, - // Block 0x84, offset 0x42f + // Block 0x83, offset 0x414 {value: 0x0020, lo: 0x05}, {value: 0x72d5, lo: 0x80, hi: 0xad}, {value: 0x6535, lo: 0xae, hi: 0xae}, {value: 0x7895, lo: 0xaf, hi: 0xb5}, {value: 0x6f55, lo: 0xb6, hi: 0xb6}, {value: 0x7975, lo: 0xb7, hi: 0xbf}, - // Block 0x85, offset 0x435 + // Block 0x84, offset 0x41a {value: 0x0028, lo: 0x03}, {value: 0x7c21, lo: 0x80, hi: 0x82}, {value: 0x7be1, lo: 0x83, hi: 0x83}, {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x86, offset 0x439 + // Block 0x85, offset 0x41e {value: 0x0038, lo: 0x0f}, {value: 0x9db1, lo: 0x80, hi: 0x83}, {value: 0x9e59, lo: 0x84, hi: 0x85}, @@ -3572,7 +3577,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xa869, lo: 0xbc, hi: 0xbc}, {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x87, offset 0x449 + // Block 0x86, offset 0x42e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, @@ -3583,24 +3588,24 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x88, offset 0x453 + // Block 0x87, offset 0x438 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x89, offset 0x458 + // Block 0x88, offset 0x43d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8a, offset 0x45b + // Block 0x89, offset 0x440 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x8b, offset 0x461 + // Block 0x8a, offset 0x446 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, @@ -3608,31 +3613,31 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8c, offset 0x468 + // Block 0x8b, offset 0x44d {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8d, offset 0x46d + // Block 0x8c, offset 0x452 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8e, offset 0x471 + // Block 0x8d, offset 0x456 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x1308, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8f, offset 0x477 + // Block 0x8e, offset 0x45c {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x90, offset 0x47c + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, @@ -3640,22 +3645,22 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x91, offset 0x485 + // Block 0x90, offset 0x46a {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x92, offset 0x48a + // Block 0x91, offset 0x46f {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x93, offset 0x490 + // Block 0x92, offset 0x475 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3663,7 +3668,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x8ad5, lo: 0x98, hi: 0x9f}, {value: 0x8aed, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x94, offset 0x497 + // Block 0x93, offset 0x47c {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, @@ -3671,7 +3676,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8aed, lo: 0xb0, hi: 0xb7}, {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, - // Block 0x95, offset 0x49e + // Block 0x94, offset 0x483 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, @@ -3679,173 +3684,176 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x96, offset 0x4a5 + // Block 0x95, offset 0x48a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x97, offset 0x4a9 + // Block 0x96, offset 0x48e {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x98, offset 0x4ae + // Block 0x97, offset 0x493 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x99, offset 0x4b1 + // Block 0x98, offset 0x496 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x9a, offset 0x4b6 + // Block 0x99, offset 0x49b {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x85}, + {value: 0x0808, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0808, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xb5}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb8}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbc}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x9b, offset 0x4c2 + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x9c, offset 0x4c8 + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0018, lo: 0xa7, hi: 0xaf}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9d, offset 0x4cd + // Block 0x9c, offset 0x4b2 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0008, lo: 0xb4, hi: 0xb5}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x9e, offset 0x4d4 + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0018, lo: 0x96, hi: 0x9b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb9}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x9f, offset 0x4dc + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb7}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0xa0, offset 0x4e1 + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xbf}, - // Block 0xa1, offset 0x4e5 + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x83}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x1308, lo: 0x85, hi: 0x86}, + {value: 0x3308, lo: 0x85, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x1308, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x93}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0008, lo: 0x95, hi: 0x97}, + {value: 0x0808, lo: 0x95, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0xb3}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xba}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa2, offset 0x4f5 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0818, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x98}, + {value: 0x0818, lo: 0x90, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbc}, - {value: 0x0018, lo: 0xbd, hi: 0xbf}, - // Block 0xa3, offset 0x4fc + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa4, offset 0x500 + // Block 0xa3, offset 0x4e5 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa5, offset 0x504 + // Block 0xa4, offset 0x4e9 {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0808, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbf}, - // Block 0xa6, offset 0x50b + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0808, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa7, offset 0x50e + // Block 0xa7, offset 0x4f5 {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa8, offset 0x511 + // Block 0xa8, offset 0x4f8 {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x515 + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbe}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x519 + // Block 0xaa, offset 0x500 {value: 0x0000, lo: 0x05}, - {value: 0x1008, lo: 0x80, hi: 0x80}, - {value: 0x1308, lo: 0x81, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xab, offset 0x51f + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x85}, - {value: 0x1b08, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x91}, {value: 0x0018, lo: 0x92, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xac, offset 0x528 + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f {value: 0x0000, lo: 0x0b}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb6}, - {value: 0x1008, lo: 0xb7, hi: 0xb8}, - {value: 0x1b08, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0340, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xad, offset 0x534 + // Block 0xad, offset 0x51b {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, @@ -3853,39 +3861,39 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xae, offset 0x53b + // Block 0xae, offset 0x522 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xb2}, - {value: 0x1b08, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xaf, offset 0x544 + // Block 0xaf, offset 0x52b {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb0, offset 0x54c + // Block 0xb0, offset 0x533 {value: 0x0000, lo: 0x06}, - {value: 0x1308, lo: 0x80, hi: 0x81}, - {value: 0x1008, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x1008, lo: 0xb3, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xbe}, - {value: 0x1008, lo: 0xbf, hi: 0xbf}, - // Block 0xb1, offset 0x553 + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a {value: 0x0000, lo: 0x0d}, - {value: 0x1808, lo: 0x80, hi: 0x80}, + {value: 0x3808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x89}, - {value: 0x1308, lo: 0x8a, hi: 0x8c}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9a}, @@ -3895,21 +3903,21 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb2, offset 0x561 + // Block 0xb2, offset 0x548 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xae}, - {value: 0x1308, lo: 0xaf, hi: 0xb1}, - {value: 0x1008, lo: 0xb2, hi: 0xb3}, - {value: 0x1308, lo: 0xb4, hi: 0xb4}, - {value: 0x1808, lo: 0xb5, hi: 0xb5}, - {value: 0x1308, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x1308, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb3, offset 0x56e + // Block 0xb3, offset 0x555 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, @@ -3923,28 +3931,28 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb4, offset 0x57b + // Block 0xb4, offset 0x562 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x1308, lo: 0x9f, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa9}, - {value: 0x1b08, lo: 0xaa, hi: 0xaa}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb5, offset 0x584 + // Block 0xb5, offset 0x56b {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x1008, lo: 0xb5, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbf}, - // Block 0xb6, offset 0x588 + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f {value: 0x0000, lo: 0x0d}, - {value: 0x1008, lo: 0x80, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x84}, - {value: 0x1008, lo: 0x85, hi: 0x85}, - {value: 0x1308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, @@ -3953,56 +3961,56 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xb7, offset 0x596 + // Block 0xb7, offset 0x57d {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xb8}, - {value: 0x1008, lo: 0xb9, hi: 0xb9}, - {value: 0x1308, lo: 0xba, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbe}, - {value: 0x1308, lo: 0xbf, hi: 0xbf}, - // Block 0xb8, offset 0x59e + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x80}, - {value: 0x1008, lo: 0x81, hi: 0x81}, - {value: 0x1b08, lo: 0x82, hi: 0x82}, - {value: 0x1308, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x85}, {value: 0x0018, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xb9, offset 0x5a9 + // Block 0xb9, offset 0x590 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb5}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x1008, lo: 0xb8, hi: 0xbb}, - {value: 0x1308, lo: 0xbc, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xba, offset 0x5b2 + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 {value: 0x0000, lo: 0x05}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x1308, lo: 0x9c, hi: 0x9d}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbb, offset 0x5b8 + // Block 0xbb, offset 0x59f {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1008, lo: 0xb0, hi: 0xb2}, - {value: 0x1308, lo: 0xb3, hi: 0xba}, - {value: 0x1008, lo: 0xbb, hi: 0xbc}, - {value: 0x1308, lo: 0xbd, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbc, offset 0x5c0 + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 {value: 0x0000, lo: 0x08}, - {value: 0x1308, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, @@ -4010,60 +4018,97 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xbd, offset 0x5c9 + // Block 0xbd, offset 0x5b0 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x1308, lo: 0xab, hi: 0xab}, - {value: 0x1008, lo: 0xac, hi: 0xac}, - {value: 0x1308, lo: 0xad, hi: 0xad}, - {value: 0x1008, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb5}, - {value: 0x1808, lo: 0xb6, hi: 0xb6}, - {value: 0x1308, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xbe, offset 0x5d3 + // Block 0xbe, offset 0x5ba {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xbf, offset 0x5d6 + // Block 0xbf, offset 0x5bd {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9f}, - {value: 0x1008, lo: 0xa0, hi: 0xa1}, - {value: 0x1308, lo: 0xa2, hi: 0xa5}, - {value: 0x1008, lo: 0xa6, hi: 0xa6}, - {value: 0x1308, lo: 0xa7, hi: 0xaa}, - {value: 0x1b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc0, offset 0x5e2 + // Block 0xc0, offset 0x5c9 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc1, offset 0x5e5 + // Block 0xc1, offset 0x5cc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc2, offset 0x5ea + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc3, offset 0x5ed + // Block 0xc6, offset 0x5f6 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x1008, lo: 0xaf, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x1308, lo: 0xb8, hi: 0xbd}, - {value: 0x1008, lo: 0xbe, hi: 0xbe}, - {value: 0x1b08, lo: 0xbf, hi: 0xbf}, - // Block 0xc4, offset 0x5f7 + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, @@ -4073,42 +4118,65 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xc5, offset 0x600 + // Block 0xc8, offset 0x609 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x1308, lo: 0x92, hi: 0xa7}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x1008, lo: 0xa9, hi: 0xa9}, - {value: 0x1308, lo: 0xaa, hi: 0xb0}, - {value: 0x1008, lo: 0xb1, hi: 0xb1}, - {value: 0x1308, lo: 0xb2, hi: 0xb3}, - {value: 0x1008, lo: 0xb4, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xc6, offset 0x60c + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xc7, offset 0x60f + // Block 0xcc, offset 0x62d {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xc8, offset 0x614 + // Block 0xcd, offset 0x632 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xc9, offset 0x617 + // Block 0xce, offset 0x635 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xbf}, - // Block 0xca, offset 0x61a + // Block 0xcf, offset 0x638 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xcb, offset 0x61d + // Block 0xd0, offset 0x63b {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, @@ -4116,20 +4184,20 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xcc, offset 0x624 + // Block 0xd1, offset 0x642 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb4}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xcd, offset 0x62b + // Block 0xd2, offset 0x649 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x1308, lo: 0xb0, hi: 0xb6}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xce, offset 0x62f + // Block 0xd3, offset 0x64d {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, @@ -4141,67 +4209,75 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xcf, offset 0x63a + // Block 0xd4, offset 0x658 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xd0, offset 0x63d + // Block 0xd5, offset 0x65b {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x1008, lo: 0x91, hi: 0xbe}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd1, offset 0x643 + // Block 0xd6, offset 0x661 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8e}, - {value: 0x1308, lo: 0x8f, hi: 0x92}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xd2, offset 0x648 + // Block 0xd7, offset 0x666 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0xd3, offset 0x64c + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xd4, offset 0x64f + // Block 0xd9, offset 0x66d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xd5, offset 0x652 + // Block 0xda, offset 0x670 {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0xbf}, - // Block 0xd6, offset 0x655 + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xd7, offset 0x65a + // Block 0xde, offset 0x67e {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x1308, lo: 0x9d, hi: 0x9e}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xd8, offset 0x664 + // Block 0xdf, offset 0x688 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xd9, offset 0x667 + // Block 0xe0, offset 0x68b {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xda, offset 0x66b + // Block 0xe1, offset 0x68f {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, @@ -4211,127 +4287,127 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xb719, lo: 0xa2, hi: 0xa2}, {value: 0xb781, lo: 0xa3, hi: 0xa3}, {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x1018, lo: 0xa5, hi: 0xa6}, - {value: 0x1318, lo: 0xa7, hi: 0xa9}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x1018, lo: 0xad, hi: 0xb2}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x1318, lo: 0xbb, hi: 0xbf}, - // Block 0xdb, offset 0x67a + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e {value: 0x0000, lo: 0x0b}, - {value: 0x1318, lo: 0x80, hi: 0x82}, + {value: 0x3318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x1318, lo: 0x85, hi: 0x8b}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x1318, lo: 0xaa, hi: 0xad}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, {value: 0xb851, lo: 0xbb, hi: 0xbb}, {value: 0xb899, lo: 0xbc, hi: 0xbc}, {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, {value: 0xb949, lo: 0xbe, hi: 0xbe}, {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xdc, offset 0x686 + // Block 0xe3, offset 0x6aa {value: 0x0000, lo: 0x03}, {value: 0xba19, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xdd, offset 0x68a + // Block 0xe4, offset 0x6ae {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x1318, lo: 0x82, hi: 0x84}, + {value: 0x3318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xde, offset 0x68f + // Block 0xe5, offset 0x6b3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xdf, offset 0x694 + // Block 0xe6, offset 0x6b8 {value: 0x0000, lo: 0x03}, - {value: 0x1308, lo: 0x80, hi: 0xb6}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x1308, lo: 0xbb, hi: 0xbf}, - // Block 0xe0, offset 0x698 + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc {value: 0x0000, lo: 0x04}, - {value: 0x1308, lo: 0x80, hi: 0xac}, + {value: 0x3308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x1308, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xe1, offset 0x69d + // Block 0xe8, offset 0x6c1 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0x9f}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x1308, lo: 0xa1, hi: 0xaf}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xe2, offset 0x6a6 + // Block 0xe9, offset 0x6ca {value: 0x0000, lo: 0x0a}, - {value: 0x1308, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x1308, lo: 0x88, hi: 0x98}, + {value: 0x3308, lo: 0x88, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x1308, lo: 0x9b, hi: 0xa1}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x1308, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x1308, lo: 0xa6, hi: 0xaa}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xe3, offset 0x6b1 + // Block 0xea, offset 0x6d5 {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8f}, - {value: 0x1308, lo: 0x90, hi: 0x96}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xe4, offset 0x6b7 + // Block 0xeb, offset 0x6db {value: 0x0000, lo: 0x07}, - {value: 0x0208, lo: 0x80, hi: 0x83}, - {value: 0x1308, lo: 0x84, hi: 0x8a}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0808, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe5, offset 0x6bf + // Block 0xec, offset 0x6e3 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xe6, offset 0x6c3 + // Block 0xed, offset 0x6e7 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0xe7, offset 0x6c7 + // Block 0xee, offset 0x6eb {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0xe8, offset 0x6cd + // Block 0xef, offset 0x6f1 {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xe9, offset 0x6d3 + // Block 0xf0, offset 0x6f7 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, {value: 0xc1c1, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xea, offset 0x6d8 + // Block 0xf1, offset 0x6fc {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0xeb, offset 0x6db - {value: 0x0000, lo: 0x0d}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, {value: 0xc7e9, lo: 0x80, hi: 0x80}, {value: 0xc839, lo: 0x81, hi: 0x81}, {value: 0xc889, lo: 0x82, hi: 0x82}, @@ -4344,84 +4420,88 @@ var idnaSparseValues = [1876]valueRange{ {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0xcab9, lo: 0x90, hi: 0x90}, {value: 0xcad9, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xec, offset 0x6e9 + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xed, offset 0x6f0 + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xee, offset 0x6f3 + // Block 0xf5, offset 0x719 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0xbf}, - // Block 0xef, offset 0x6f6 + // Block 0xf6, offset 0x71c {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0xf0, offset 0x6fa + // Block 0xf7, offset 0x720 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0xf1, offset 0x700 + // Block 0xf8, offset 0x726 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0xf2, offset 0x705 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xf3, offset 0x70f + // Block 0xf9, offset 0x72b {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xf4, offset 0x714 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0xbf}, - // Block 0xf5, offset 0x717 + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0xbf}, - // Block 0xf6, offset 0x71a + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xf7, offset 0x71d + // Block 0xfe, offset 0x740 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xf8, offset 0x720 + // Block 0xff, offset 0x743 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xf9, offset 0x724 - {value: 0x0000, lo: 0x02}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xbf}, - // Block 0xfa, offset 0x727 + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e {value: 0x0020, lo: 0x0f}, {value: 0xdeb9, lo: 0x80, hi: 0x89}, {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, @@ -4438,7 +4518,7 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xe4f9, lo: 0xba, hi: 0xba}, {value: 0x8edd, lo: 0xbb, hi: 0xbb}, {value: 0xe519, lo: 0xbc, hi: 0xbf}, - // Block 0xfb, offset 0x737 + // Block 0x103, offset 0x75e {value: 0x0020, lo: 0x10}, {value: 0x937d, lo: 0x80, hi: 0x80}, {value: 0xf099, lo: 0x81, hi: 0x86}, @@ -4455,23 +4535,23 @@ var idnaSparseValues = [1876]valueRange{ {value: 0xf4d9, lo: 0xae, hi: 0xaf}, {value: 0x94dd, lo: 0xb0, hi: 0xb1}, {value: 0xf519, lo: 0xb2, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xfc, offset 0x748 + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0xfd, offset 0x74d + // Block 0x105, offset 0x774 {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0xfe, offset 0x74f + // Block 0x106, offset 0x776 {value: 0x0000, lo: 0x01}, - {value: 0x13c0, lo: 0x80, hi: 0xbf}, - // Block 0xff, offset 0x751 + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 {value: 0x0000, lo: 0x02}, - {value: 0x13c0, lo: 0x80, hi: 0xaf}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } -// Total table size 41559 bytes (40KiB); checksum: F4A1FA4E +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go index 63cb03b59b..7a8cf889b5 100644 --- a/vendor/golang.org/x/net/idna/trieval.go +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -26,9 +26,9 @@ package idna // 15..3 index into xor or mapping table // } // } else { -// 15..13 unused -// 12 modifier (including virama) -// 11 virama modifier +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes // 10..8 joining type // 7..3 category type // } @@ -49,15 +49,20 @@ const ( joinShift = 8 joinMask = 0x07 - viramaModifier = 0x0800 + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 ) // A category corresponds to a category defined in the IDNA mapping table. type category uint16 const ( - unknown category = 0 // not defined currently in unicode. + unknown category = 0 // not currently defined in unicode. mapped category = 1 disallowedSTD3Mapped category = 2 deviation category = 3 @@ -110,5 +115,5 @@ func (c info) isModifier() bool { } func (c info) isViramaModifier() bool { - return c&(viramaModifier|catSmallMask) == viramaModifier + return c&(attributesMask|catSmallMask) == viramaModifier } diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 0000000000..03265e888a --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index e31541b39e..6978192a99 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 53ec23cee0..cf959ea69f 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index f1f173e345..783bd98c8b 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package internal contains support packages for oauth2 package. package internal import ( diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 45e281a047..2e06b33f2e 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -1,4 +1,4 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go index 9222262559..c44fdc4afa 100644 --- a/vendor/golang.org/x/sys/unix/env_unset.go +++ b/vendor/golang.org/x/sys/unix/env_unset.go @@ -1,4 +1,4 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 94c8232124..40bed3fa80 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,7 +8,7 @@ package unix import "syscall" -// We can't use the gc-syntax .s files for gccgo. On the plus side +// We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. //extern gccgoRealSyscall diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 07f6be0392..99a774f2be 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index bffe1a77db..251a977a81 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -1,4 +1,4 @@ -// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index c3a0809268..00b7ce7ac1 100755 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -142,7 +142,6 @@ openbsd_386) mkerrors="$mkerrors -m32" mksyscall="./mksyscall.pl -l32 -openbsd" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -150,7 +149,6 @@ openbsd_amd64) mkerrors="$mkerrors -m64" mksyscall="./mksyscall.pl -openbsd" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; @@ -158,7 +156,6 @@ openbsd_arm) mkerrors="$mkerrors" mksyscall="./mksyscall.pl -l32 -openbsd -arm" mksysctl="./mksysctl_openbsd.pl" - zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 22bf41d852..9a799b2f5b 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -84,6 +84,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -183,6 +184,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -283,6 +285,7 @@ includes_SunOS=' #include #include #include +#include #include #include #include @@ -347,6 +350,7 @@ ccflags="$@" $2 !~ /^EXPR_/ && $2 ~ /^E[A-Z0-9_]+$/ || $2 ~ /^B[0-9_]+$/ || + $2 ~ /^(OLD|NEW)DEV$/ || $2 == "BOTHER" || $2 ~ /^CI?BAUD(EX)?$/ || $2 == "IBSHIFT" || @@ -417,7 +421,9 @@ ccflags="$@" $2 ~ /^(VM|VMADDR)_/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^GENL_/ || + $2 ~ /^UTIME_/ || $2 ~ /^XATTR_(CREATE|REPLACE)/ || + $2 ~ /^WDIOC_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 45afcf72d6..83c85e0196 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 3c7627eb5c..61712b51c9 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index f8678e0d21..dd0820431e 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index d9ff4731a2..6079eb4ac1 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 85e35020e2..857d2a42d4 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -5,10 +5,10 @@ // +build darwin dragonfly freebsd linux netbsd openbsd solaris // Package unix contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and +// primitives. OS details vary depending on the underlying system, and // by default, godoc will display OS-specific documentation for the current -// system. If you want godoc to display OS documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if +// system. If you want godoc to display OS documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. // The primary use of this package is inside other packages that provide a more @@ -49,21 +49,3 @@ func BytePtrFromString(s string) (*byte, error) { // Single-word zero for use when we need a valid pointer to 0 bytes. // See mkunix.pl. var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} - -func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index c2846b32d6..695914e2a0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -34,7 +34,7 @@ func Getgroups() (gids []int, err error) { return nil, nil } - // Sanity check group count. Max is 16 on BSD. + // Sanity check group count. Max is 16 on BSD. if n < 0 || n > 1000 { return nil, EINVAL } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index ad74a11fb3..6ecb66eae3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -54,7 +54,7 @@ func nametomib(name string) (mib []_C_int, err error) { // NOTE(rsc): It seems strange to set the buffer to have // size CTL_MAXNAME+2 but use only CTL_MAXNAME - // as the size. I don't know why the +2 is here, but the + // as the size. I don't know why the +2 is here, but the // kernel uses +2 for its own implementation of this function. // I am scared that if we don't include the +2 here, the kernel // will silently write 2 words farther than we specify @@ -239,6 +239,15 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -377,7 +386,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { // Searchfs // Delete // Copyfile -// Poll // Watchevent // Waitevent // Modwatch diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 76634f7ab1..b3ac109a2f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -11,25 +11,18 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = int32(sec) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7be02dab9d..75219444a8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -11,25 +11,18 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = sec diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 26b66972f0..47ab664859 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -9,25 +9,18 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = int32(sec) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 4d67a87427..d6d9628014 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -11,25 +11,18 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } //sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) func Gettimeofday(tv *Timeval) (err error) { // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back + // but is otherwise unused. The answers come back // in the two registers. sec, usec, err := gettimeofday(tv) tv.Sec = sec diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 3a483373dc..42d5c8b74b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,6 +125,15 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -257,7 +266,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Searchfs // Delete // Copyfile -// Poll // Watchevent // Waitevent // Modwatch @@ -403,7 +411,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // Pread_nocancel // Pwrite_nocancel // Waitid_nocancel -// Poll_nocancel // Msgsnd_nocancel // Msgrcv_nocancel // Sem_wait_nocancel diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 6d8952d5a1..9babb31ea7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -11,19 +11,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d26e52eaef..597808f998 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -32,7 +32,7 @@ func nametomib(name string) (mib []_C_int, err error) { // NOTE(rsc): It seems strange to set the buffer to have // size CTL_MAXNAME+2 but use only CTL_MAXNAME - // as the size. I don't know why the +2 is here, but the + // as the size. I don't know why the +2 is here, but the // kernel uses +2 for its own implementation of this function. // I am scared that if we don't include the +2 here, the kernel // will silently write 2 words farther than we specify @@ -391,6 +391,15 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) { return &value, err } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 4cf5f453f5..21e03958cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -11,19 +11,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int32(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index b8036e7268..9c945a6579 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -11,19 +11,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5a3bb6a154..5cd6243f2a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -11,19 +11,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = nsec / 1e9 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 1b7d59d896..b98a7e1544 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -255,7 +255,7 @@ func Getgroups() (gids []int, err error) { return nil, nil } - // Sanity check group count. Max is 1<<16 on Linux. + // Sanity check group count. Max is 1<<16 on Linux. if n < 0 || n > 1<<20 { return nil, EINVAL } @@ -290,8 +290,8 @@ type WaitStatus uint32 // 0x7F (stopped), or a signal number that caused an exit. // The 0x80 bit is whether there was a core dump. // An extra number (exit code, signal causing a stop) -// is in the high bits. At least that's the idea. -// There are various irregularities. For example, the +// is in the high bits. At least that's the idea. +// There are various irregularities. For example, the // "continued" status is 0xFFFF, distinguishing itself // from stopped via the core dump bit. @@ -926,7 +926,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from msg.Namelen = uint32(SizeofSockaddrAny) var iov Iovec if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte @@ -941,7 +941,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from iov.Base = &dummy iov.SetLen(1) } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov @@ -974,11 +974,11 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } } var msg Msghdr - msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) var iov Iovec if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.Base = &p[0] iov.SetLen(len(p)) } var dummy byte @@ -993,7 +993,7 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) iov.Base = &dummy iov.SetLen(1) } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.Control = &oob[0] msg.SetControllen(len(oob)) } msg.Iov = &iov @@ -1023,7 +1023,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro var buf [sizeofPtr]byte - // Leading edge. PEEKTEXT/PEEKDATA don't require aligned + // Leading edge. PEEKTEXT/PEEKDATA don't require aligned // access (PEEKUSER warns that it might), but if we don't // align our reads, we might straddle an unmapped page // boundary and not get the bytes leading up to the page @@ -1262,6 +1262,7 @@ func Getpgrp() (pid int) { //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) +//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index f4c826a456..4774fa363e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -14,19 +14,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb pipe(p *[2]_C_int) (err error) @@ -183,9 +176,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // On x86 Linux, all the socket calls go through an extra indirection, // I think because the 5-register system call interface can't handle -// the 6-argument calls like sendto and recvfrom. Instead the +// the 6-argument calls like sendto and recvfrom. Instead the // arguments to the underlying system call are the number below -// and a pointer to an array of uintptr. We hide the pointer in the +// and a pointer to an array of uintptr. We hide the pointer in the // socketcall assembly to avoid allocation on every system call. const ( diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 0715200dcf..3707f6b7c9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -83,19 +83,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } //sysnb pipe(p *[2]_C_int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 2b79c84a67..226be100f5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -11,19 +11,12 @@ import ( "unsafe" ) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 68cc975dbb..9a8e6e4117 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -21,7 +21,12 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + return Pselect(nfd, r, w, e, &ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -68,19 +73,12 @@ func Lstat(path string, stat *Stat_t) (err error) { //sysnb Gettimeofday(tv *Timeval) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func Time(t *Time_t) (Time_t, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 977df441be..cdda11a9fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -23,7 +23,12 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6 + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + return Pselect(nfd, r, w, e, &ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -71,19 +76,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func Pipe(p []int) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 25a5a0da5a..a114ba8cb3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -99,19 +99,12 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { return } -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int32(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: int32(sec), Usec: int32(usec)} } //sysnb pipe2(p *[2]_C_int, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 28b7f350d1..7cae936c45 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -28,7 +28,7 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -66,19 +66,12 @@ package unix //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func (r *PtraceRegs) PC() uint64 { return r.Nip } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 3845fc9c43..e96a40cb21 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -62,19 +62,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = nsec % 1e9 / 1e3 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } //sysnb pipe2(p *[2]_C_int, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index bd9de3e9d0..012a3285ef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -82,19 +82,12 @@ func Time(t *Time_t) (tt Time_t, err error) { //sys Utime(path string, buf *Utimbuf) (err error) -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Sec = nsec / 1e9 - tv.Usec = int32(nsec % 1e9 / 1e3) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func (r *PtraceRegs) PC() uint64 { return r.Tpc } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e129668459..f0370bfafa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -124,6 +124,15 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return -1, ENOSYS } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -422,7 +431,6 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // ntp_adjtime // pmc_control // pmc_get_info -// poll // pollts // preadv // profil diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index baefa411ec..24f74e58ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 59c2ab7eba..6878bf7ff9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int64(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 7208108a31..dbbfcf71db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 408e63081c..a159fc4b64 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -102,6 +102,15 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) + +func Poll(fds []PollFd, timeout int) (n int, err error) { + if len(fds) == 0 { + return poll(nil, 0, timeout) + } + return poll(&fds[0], len(fds), timeout) +} + /* * Exposed directly */ @@ -243,7 +252,6 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { // nfssvc // nnpfspioctl // openat -// poll // preadv // profil // pwritev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d3809b426c..994964a916 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a9dfceffd..649e67fccc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = nsec / 1e9 - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index ba8649056f..59844f5041 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = int64(nsec / 1e9) - ts.Nsec = int32(nsec % 1e9) - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: int32(nsec)} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = int32(nsec % 1e9 / 1e3) - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index dbb166e00a..80be7902b0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -166,7 +166,7 @@ func Getwd() (wd string, err error) { func Getgroups() (gids []int, err error) { n, err := getgroups(0, nil) - // Check for error and sanity check group count. Newer versions of + // Check for error and sanity check group count. Newer versions of // Solaris allow up to 1024 (NGROUPS_MAX). if n < 0 || n > 1024 { if err != nil { @@ -350,7 +350,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { } // Solaris doesn't have an futimes function because it allows NULL to be -// specified as the path for futimesat. However, Go doesn't like +// specified as the path for futimesat. However, Go doesn't like // NULL-style string interfaces, so this simple wrapper is provided. func Futimes(fd int, tv []Timeval) error { if tv == nil { @@ -514,6 +514,24 @@ func Acct(path string) (err error) { return acct(pathp) } +//sys __makedev(version int, major uint, minor uint) (val uint64) + +func Mkdev(major, minor uint32) uint64 { + return __makedev(NEWDEV, uint(major), uint(minor)) +} + +//sys __major(version int, dev uint64) (val uint) + +func Major(dev uint64) uint32 { + return uint32(__major(NEWDEV, dev)) +} + +//sys __minor(version int, dev uint64) (val uint) + +func Minor(dev uint64) uint32 { + return uint32(__minor(NEWDEV, dev)) +} + /* * Expose the ioctl function */ @@ -612,6 +630,7 @@ func IoctlGetTermio(fd int, req uint) (*Termio, error) { //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) //sys Mprotect(b []byte, prot int) (err error) +//sys Msync(b []byte, flags int) (err error) //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 5aff62c3bb..9d4e7a678f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -6,19 +6,12 @@ package unix -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} } -func NsecToTimeval(nsec int64) (tv Timeval) { - nsec += 999 // round up to microsecond - tv.Usec = nsec % 1e9 / 1e3 - tv.Sec = int64(nsec / 1e9) - return +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} } func (iov *Iovec) SetLen(length int) { diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go new file mode 100644 index 0000000000..139fbbebbb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +// TimespecToNsec converts a Timespec value into a number of +// nanoseconds since the Unix epoch. +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +// NsecToTimespec takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timespec value. +func NsecToTimespec(nsec int64) Timespec { + sec := nsec / 1e9 + nsec = nsec % 1e9 + if nsec < 0 { + nsec += 1e9 + sec-- + } + return setTimespec(sec, nsec) +} + +// TimevalToNsec converts a Timeval value into a number of nanoseconds +// since the Unix epoch. +func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } + +// NsecToTimeval takes a number of nanoseconds since the Unix epoch +// and returns the corresponding Timeval value. +func NsecToTimeval(nsec int64) Timeval { + nsec += 999 // round up to microsecond + usec := nsec % 1e9 / 1e3 + sec := nsec / 1e9 + if usec < 0 { + usec += 1e6 + sec-- + } + return setTimeval(sec, usec) +} + +// Unix returns ts as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +// Unix returns tv as the number of seconds and nanoseconds elapsed since the +// Unix epoch. +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 1d3eec44d4..adf5eef0f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -981,6 +981,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index ac094f9cf3..360caff4f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -982,6 +982,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index c5c6f13e53..87deda950e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -989,6 +989,49 @@ const ( MAP_STACK = 0x400 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0xd0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0x2d8d0807e + MNT_USER = 0x8000 + MNT_VISFLAGMASK = 0x3fef0ffff + MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 MSG_CTRUNC = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4066ad1e0f..bb8a7724bc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1277,7 +1277,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1842,6 +1842,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1871,6 +1873,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c9f53b0b37..cf0b2249f7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1278,7 +1278,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1843,6 +1843,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1872,6 +1874,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3e8c2c7aa6..57cfcf3fe0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1282,7 +1282,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1847,6 +1847,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1876,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 383453349f..b6e5b090ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1188,7 +1188,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1268,7 +1268,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1833,6 +1833,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1862,6 +1864,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index bde8f7d023..0113e1f6ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1846,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1876,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 42b6397d5d..6857657a50 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1846,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1876,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index bd4ff81474..14f7e0e056 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1187,7 +1187,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1846,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1876,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 6dfc95c40f..f795862d87 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1279,7 +1279,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1846,6 +1846,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1876,6 +1878,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46b09d320d..2544c4b632 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1189,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1335,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1904,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1933,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 08adb1d8fc..133bdf5847 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1189,7 +1189,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1335,7 +1335,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1904,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -1933,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 70bc1a2fc5..b921fb17a4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1186,7 +1186,7 @@ const ( PR_SET_NO_NEW_PRIVS = 0x26 PR_SET_PDEATHSIG = 0x1 PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = -0x1 + PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c PR_SET_THP_DISABLE = 0x29 @@ -1339,7 +1339,7 @@ const ( RLIMIT_RTTIME = 0xf RLIMIT_SIGPENDING = 0xb RLIMIT_STACK = 0x3 - RLIM_INFINITY = -0x1 + RLIM_INFINITY = 0xffffffffffffffff RTAX_ADVMSS = 0x8 RTAX_CC_ALGO = 0x10 RTAX_CWND = 0x7 @@ -1904,6 +1904,8 @@ const ( TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc UMOUNT_NOFOLLOW = 0x8 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1933,6 +1935,17 @@ const ( WALL = 0x40000000 WCLONE = 0x80000000 WCONTINUED = 0x8 + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 WNOHANG = 0x1 WNOTHREAD = 0x20000000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 81e83d78fc..09eedb0093 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -664,6 +664,8 @@ const ( MS_OLDSYNC = 0x0 MS_SYNC = 0x4 M_FLUSH = 0x86 + NAME_MAX = 0xff + NEWDEV = 0x1 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 @@ -672,6 +674,9 @@ const ( OFDEL = 0x80 OFILL = 0x40 OLCUC = 0x2 + OLDDEV = 0x0 + ONBITSMAJOR = 0x7 + ONBITSMINOR = 0x8 ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 @@ -1105,6 +1110,7 @@ const ( VEOL = 0x5 VEOL2 = 0x6 VERASE = 0x2 + VERASE2 = 0x11 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go new file mode 100644 index 0000000000..2d21c49e12 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace386_linux.go @@ -0,0 +1,80 @@ +// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. + +// +build linux +// +build 386 amd64 + +package unix + +import "unsafe" + +// PtraceRegs386 is the registers used by 386 binaries. +type PtraceRegs386 struct { + Ebx int32 + Ecx int32 + Edx int32 + Esi int32 + Edi int32 + Ebp int32 + Eax int32 + Xds int32 + Xes int32 + Xfs int32 + Xgs int32 + Orig_eax int32 + Eip int32 + Xcs int32 + Eflags int32 + Esp int32 + Xss int32 +} + +// PtraceGetRegs386 fetches the registers used by 386 binaries. +func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegs386 sets the registers used by 386 binaries. +func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsAmd64 is the registers used by amd64 binaries. +type PtraceRegsAmd64 struct { + R15 uint64 + R14 uint64 + R13 uint64 + R12 uint64 + Rbp uint64 + Rbx uint64 + R11 uint64 + R10 uint64 + R9 uint64 + R8 uint64 + Rax uint64 + Rcx uint64 + Rdx uint64 + Rsi uint64 + Rdi uint64 + Orig_rax uint64 + Rip uint64 + Cs uint64 + Eflags uint64 + Rsp uint64 + Ss uint64 + Fs_base uint64 + Gs_base uint64 + Ds uint64 + Es uint64 + Fs uint64 + Gs uint64 +} + +// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. +func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsAmd64 sets the registers used by amd64 binaries. +func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go new file mode 100644 index 0000000000..faf23bbed9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracearm_linux.go @@ -0,0 +1,41 @@ +// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. + +// +build linux +// +build arm arm64 + +package unix + +import "unsafe" + +// PtraceRegsArm is the registers used by arm binaries. +type PtraceRegsArm struct { + Uregs [18]uint32 +} + +// PtraceGetRegsArm fetches the registers used by arm binaries. +func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm sets the registers used by arm binaries. +func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsArm64 is the registers used by arm64 binaries. +type PtraceRegsArm64 struct { + Regs [31]uint64 + Sp uint64 + Pc uint64 + Pstate uint64 +} + +// PtraceGetRegsArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsArm64 sets the registers used by arm64 binaries. +func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go new file mode 100644 index 0000000000..c431131e63 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemips_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. + +// +build linux +// +build mips mips64 + +package unix + +import "unsafe" + +// PtraceRegsMips is the registers used by mips binaries. +type PtraceRegsMips struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips fetches the registers used by mips binaries. +func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips sets the registers used by mips binaries. +func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64 is the registers used by mips64 binaries. +type PtraceRegsMips64 struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64 fetches the registers used by mips64 binaries. +func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64 sets the registers used by mips64 binaries. +func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go new file mode 100644 index 0000000000..dc3d6d3732 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go @@ -0,0 +1,50 @@ +// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. + +// +build linux +// +build mipsle mips64le + +package unix + +import "unsafe" + +// PtraceRegsMipsle is the registers used by mipsle binaries. +type PtraceRegsMipsle struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMipsle fetches the registers used by mipsle binaries. +func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMipsle sets the registers used by mipsle binaries. +func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} + +// PtraceRegsMips64le is the registers used by mips64le binaries. +type PtraceRegsMips64le struct { + Regs [32]uint64 + Lo uint64 + Hi uint64 + Epc uint64 + Badvaddr uint64 + Status uint64 + Cause uint64 +} + +// PtraceGetRegsMips64le fetches the registers used by mips64le binaries. +func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { + return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) +} + +// PtraceSetRegsMips64le sets the registers used by mips64le binaries. +func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { + return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 10491e9ed3..9fb1b31f48 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 5f1f6bfef7..1e0fb46b01 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 7a40974594..e1026a88a5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 07c6ebc9f4..37fb210a08 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -408,6 +408,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 7fa205cd03..3738c1b5df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -412,6 +412,17 @@ func extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 1a0bb4cb0e..be00d5d4fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index ac1e8e0136..22c3db7658 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 2b4e6acf04..83ada5739b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -388,6 +388,17 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 000a46833f..85a2907e5d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 712dffde4e..8e2be97d36 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 338796d909..5ff0637fde 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index fc3006d97c..40760110f3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1667,17 +1678,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 4b0ef20762..984e561733 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 31eb98c7d9..f98194e245 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1677,17 +1688,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 875ffa33f5..f30267019b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1677,17 +1688,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 6863e81aa6..f18c5e4a76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 39eacd630d..bc268243cf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1734,7 +1745,7 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 2a79746bf5..8d874cbcdc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1734,7 +1745,7 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 49021966f0..169321273d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1035,6 +1035,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index db99fd0c99..32d9656bbb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -395,6 +395,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 7b6c2c87e6..6d6e10a943 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -395,6 +395,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 0f4cc3b528..936a0c5917 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -395,6 +395,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 7baea87c7b..f135b65feb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d69ce6b52..d2bd72ddc2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 41572c26e4..317f8c24cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -393,6 +393,17 @@ func getdents(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 32b0209a59..98b2665500 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -25,6 +25,9 @@ import ( //go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" //go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" //go:cgo_import_dynamic libc_acct acct "libc.so" +//go:cgo_import_dynamic libc___makedev __makedev "libc.so" +//go:cgo_import_dynamic libc___major __major "libc.so" +//go:cgo_import_dynamic libc___minor __minor "libc.so" //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" //go:cgo_import_dynamic libc_access access "libc.so" //go:cgo_import_dynamic libc_adjtime adjtime "libc.so" @@ -75,6 +78,7 @@ import ( //go:cgo_import_dynamic libc_mlock mlock "libc.so" //go:cgo_import_dynamic libc_mlockall mlockall "libc.so" //go:cgo_import_dynamic libc_mprotect mprotect "libc.so" +//go:cgo_import_dynamic libc_msync msync "libc.so" //go:cgo_import_dynamic libc_munlock munlock "libc.so" //go:cgo_import_dynamic libc_munlockall munlockall "libc.so" //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" @@ -145,6 +149,9 @@ import ( //go:linkname proc__xnet_recvmsg libc___xnet_recvmsg //go:linkname proc__xnet_sendmsg libc___xnet_sendmsg //go:linkname procacct libc_acct +//go:linkname proc__makedev libc___makedev +//go:linkname proc__major libc___major +//go:linkname proc__minor libc___minor //go:linkname procioctl libc_ioctl //go:linkname procAccess libc_access //go:linkname procAdjtime libc_adjtime @@ -195,6 +202,7 @@ import ( //go:linkname procMlock libc_mlock //go:linkname procMlockall libc_mlockall //go:linkname procMprotect libc_mprotect +//go:linkname procMsync libc_msync //go:linkname procMunlock libc_munlock //go:linkname procMunlockall libc_munlockall //go:linkname procNanosleep libc_nanosleep @@ -266,6 +274,9 @@ var ( proc__xnet_recvmsg, proc__xnet_sendmsg, procacct, + proc__makedev, + proc__major, + proc__minor, procioctl, procAccess, procAdjtime, @@ -316,6 +327,7 @@ var ( procMlock, procMlockall, procMprotect, + procMsync, procMunlock, procMunlockall, procNanosleep, @@ -519,6 +531,24 @@ func acct(path *byte) (err error) { return } +func __makedev(version int, major uint, minor uint) (val uint64) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0) + val = uint64(r0) + return +} + +func __major(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + +func __minor(version int, dev uint64) (val uint) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0) + val = uint(r0) + return +} + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) if e1 != 0 { @@ -1017,6 +1047,18 @@ func Mprotect(b []byte, prot int) (err error) { return } +func Msync(b []byte, flags int) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Munlock(b []byte) (err error) { var _p0 *byte if len(b) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go rename to vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go new file mode 100644 index 0000000000..83bb935b91 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go new file mode 100644 index 0000000000..83bb935b91 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -0,0 +1,270 @@ +// mksysctl_openbsd.pl +// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.arandom", []_C_int{1, 37}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.random", []_C_int{1, 31}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.tty.maxptys", []_C_int{1, 44, 6}}, + {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.userasymcrypto", []_C_int{1, 60}}, + {"kern.usercrypto", []_C_int{1, 52}}, + {"kern.usermount", []_C_int{1, 30}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.vnode", []_C_int{1, 13}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, + {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, + {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, + {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index e61d78a54f..4667c7b277 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -460,3 +460,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2619155ff8..3f33b18fc7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -470,3 +470,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 4dca0d4db2..463a28ba6f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -461,3 +461,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index f2881fd142..1ec20a0025 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -1,6 +1,7 @@ +// cgo -godefs types_darwin.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + // +build arm64,darwin -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_darwin.go package unix @@ -469,3 +470,22 @@ const ( AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index 67c6bf883c..ab515c3e1a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -446,3 +446,22 @@ const ( AT_FDCWD = 0xfffafdcd AT_SYMLINK_NOFOLLOW = 0x1 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 5b28bcbbac..18f7816009 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -516,6 +516,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index c65d89e497..dd0db2a5ea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -519,6 +519,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 42c0a502cf..473d3dcf08 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -519,6 +519,26 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type CapRights struct { Rights [2]uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 22bdab9614..207c408e7b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -652,8 +652,6 @@ type Sigset_t struct { X__val [16]uint64 } -const _SC_PAGESIZE = 0x1e - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 42f99c0a30..dfe446bff7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,netbsd @@ -387,6 +387,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index ff290ba069..1498c23c22 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,netbsd @@ -394,6 +394,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 66dbd7c050..d6711ce170 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_netbsd.go +// cgo -godefs types_netbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,netbsd @@ -392,6 +392,25 @@ const ( AT_SYMLINK_NOFOLLOW = 0x200 ) +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + type Sysctlnode struct { Flags uint32 Num int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 20fc9f450c..af295c3d0c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,openbsd @@ -444,3 +444,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 46fe9490c8..ae153e70c0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,openbsd @@ -451,3 +451,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index 62e1f7c04d..35bb6195bf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go +// cgo -godefs types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,openbsd @@ -437,3 +437,22 @@ const ( AT_FDCWD = -0x64 AT_SYMLINK_NOFOLLOW = 0x2 ) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 92336f9f92..a979a33d51 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -413,8 +413,6 @@ type BpfHdr struct { Pad_cgo_0 [2]byte } -const _SC_PAGESIZE = 0xb - type Termios struct { Iflag uint32 Oflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index e77a370550..e92c05b213 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -116,7 +116,7 @@ func (p *Proc) Addr() uintptr { //go:uintptrescapes -// Call executes procedure p with arguments a. It will panic, if more then 15 arguments +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments // are supplied. // // The returned error is always non-nil, constructed from the result of GetLastError. @@ -289,6 +289,7 @@ func (p *LazyProc) mustFind() { // Addr returns the address of the procedure represented by p. // The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. func (p *LazyProc) Addr() uintptr { p.mustFind() return p.proc.Addr() @@ -296,8 +297,8 @@ func (p *LazyProc) Addr() uintptr { //go:uintptrescapes -// Call executes procedure p with arguments a. It will panic, if more then 15 arguments -// are supplied. +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. // // The returned error is always non-nil, constructed from the result of GetLastError. // Callers must inspect the primary return value to decide whether an error occurred diff --git a/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/golang.org/x/sys/windows/env_unset.go index 4ed03aeefc..b712c6604a 100644 --- a/vendor/golang.org/x/sys/windows/env_unset.go +++ b/vendor/golang.org/x/sys/windows/env_unset.go @@ -1,4 +1,4 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index a9d8ef4b7d..e8292386c0 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -1,4 +1,4 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go index f63e899acb..f80a4204f0 100644 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index e1c88c9c71..fb7db0ef8d 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 343e18ab69..a74e3e24b5 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 17af843b91..e44a3cbf67 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index ca09bdd701..d8e7ff2ec5 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -1,4 +1,4 @@ -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 4e2fbe86e2..b07bc2305d 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -5,10 +5,10 @@ // +build windows // Package windows contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and +// primitives. OS details vary depending on the underlying system, and // by default, godoc will display the OS-specific documentation for the current -// system. If you want godoc to display syscall documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. // The primary use of this package is inside other packages that provide a more diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 9b5ed549a5..bb778dbd2e 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -202,6 +202,21 @@ func NewCallbackCDecl(fn interface{}) uintptr { // syscall interface implementation for other packages +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func Exit(code int) { ExitProcess(uint32(code)) } func makeInheritSa() *SecurityAttributes { diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 401a5f2d9a..0229f79cfc 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go index 10f33be0b7..fe0ddd0316 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_386.go +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go index 3f272c2499..7e154c2df2 100644 --- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go deleted file mode 100644 index 03d9f99ad6..0000000000 --- a/vendor/golang.org/x/text/width/gen.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// This program generates the trie for width operations. The generated table -// includes width category information as well as the normalization mappings. -package main - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "unicode/utf8" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" -) - -// See gen_common.go for flags. - -func main() { - gen.Init() - genTables() - genTests() - gen.Repackage("gen_trieval.go", "trieval.go", "width") - gen.Repackage("gen_common.go", "common_test.go", "width") -} - -func genTables() { - t := triegen.NewTrie("width") - // fold and inverse mappings. See mapComment for a description of the format - // of each entry. Add dummy value to make an index of 0 mean no mapping. - inverse := [][4]byte{{}} - mapping := map[[4]byte]int{[4]byte{}: 0} - - getWidthData(func(r rune, tag elem, alt rune) { - idx := 0 - if alt != 0 { - var buf [4]byte - buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) - s := string(r) - buf[buf[0]] ^= s[len(s)-1] - var ok bool - if idx, ok = mapping[buf]; !ok { - idx = len(mapping) - if idx > math.MaxUint8 { - log.Fatalf("Index %d does not fit in a byte.", idx) - } - mapping[buf] = idx - inverse = append(inverse, buf) - } - } - t.Insert(r, uint64(tag|elem(idx))) - }) - - w := &bytes.Buffer{} - gen.WriteUnicodeVersion(w) - - sz, err := t.Gen(w) - if err != nil { - log.Fatal(err) - } - - sz += writeMappings(w, inverse) - - fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) - - gen.WriteGoFile(*outputFile, "width", w.Bytes()) -} - -const inverseDataComment = ` -// inverseData contains 4-byte entries of the following format: -// <0 padding> -// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the -// UTF-8 encoding of the original rune. Mappings often have the following -// pattern: -// A -> A (U+FF21 -> U+0041) -// B -> B (U+FF22 -> U+0042) -// ... -// By xor-ing the last byte the same entry can be shared by many mappings. This -// reduces the total number of distinct entries by about two thirds. -// The resulting entry for the aforementioned mappings is -// { 0x01, 0xE0, 0x00, 0x00 } -// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get -// E0 ^ A1 = 41. -// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get -// E0 ^ A2 = 42. -// Note that because of the xor-ing, the byte sequence stored in the entry is -// not valid UTF-8.` - -func writeMappings(w io.Writer, data [][4]byte) int { - fmt.Fprintln(w, inverseDataComment) - fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) - for _, x := range data { - fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) - } - fmt.Fprintln(w, "}") - return len(data) * 4 -} - -func genTests() { - w := &bytes.Buffer{} - fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") - getWidthData(func(r rune, tag elem, alt rune) { - if alt != 0 { - fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) - } - }) - fmt.Fprintln(w, "}") - gen.WriteGoFile("runes_test.go", "width", w.Bytes()) -} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go deleted file mode 100644 index 601e752684..0000000000 --- a/vendor/golang.org/x/text/width/gen_common.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// This code is shared between the main code generator and the test code. - -import ( - "flag" - "log" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/ucd" -) - -var ( - outputFile = flag.String("out", "tables.go", "output file") -) - -var typeMap = map[string]elem{ - "A": tagAmbiguous, - "N": tagNeutral, - "Na": tagNarrow, - "W": tagWide, - "F": tagFullwidth, - "H": tagHalfwidth, -} - -// getWidthData calls f for every entry for which it is defined. -// -// f may be called multiple times for the same rune. The last call to f is the -// correct value. f is not called for all runes. The default tag type is -// Neutral. -func getWidthData(f func(r rune, tag elem, alt rune)) { - // Set the default values for Unified Ideographs. In line with Annex 11, - // we encode full ranges instead of the defined runes in Unified_Ideograph. - for _, b := range []struct{ lo, hi rune }{ - {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, - {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, - {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, - {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, - {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, - } { - for r := b.lo; r <= b.hi; r++ { - f(r, tagWide, 0) - } - } - - inverse := map[rune]rune{} - maps := map[string]bool{ - "": true, - "": true, - } - - // We cannot reuse package norm's decomposition, as we need an unexpanded - // decomposition. We make use of the opportunity to verify that the - // decomposition type is as expected. - ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { - r := p.Rune(0) - s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) - if !maps[s[0]] { - return - } - x, err := strconv.ParseUint(s[1], 16, 32) - if err != nil { - log.Fatalf("Error parsing rune %q", s[1]) - } - if inverse[r] != 0 || inverse[rune(x)] != 0 { - log.Fatalf("Circular dependency in mapping between %U and %U", r, x) - } - inverse[r] = rune(x) - inverse[rune(x)] = r - }) - - // ; - ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { - tag, ok := typeMap[p.String(1)] - if !ok { - log.Fatalf("Unknown width type %q", p.String(1)) - } - r := p.Rune(0) - alt, ok := inverse[r] - if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { - tag |= tagNeedsFold - if !ok { - log.Fatalf("Narrow or wide rune %U has no decomposition", r) - } - } - f(r, tag, alt) - }) -} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go deleted file mode 100644 index c17334aa61..0000000000 --- a/vendor/golang.org/x/text/width/gen_trieval.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -// elem is an entry of the width trie. The high byte is used to encode the type -// of the rune. The low byte is used to store the index to a mapping entry in -// the inverseData array. -type elem uint16 - -const ( - tagNeutral elem = iota << typeShift - tagAmbiguous - tagWide - tagNarrow - tagFullwidth - tagHalfwidth -) - -const ( - numTypeBits = 3 - typeShift = 16 - numTypeBits - - // tagNeedsFold is true for all fullwidth and halfwidth runes except for - // the Won sign U+20A9. - tagNeedsFold = 0x1000 - - // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide - // variant. - wonSign rune = 0x20A9 -) diff --git a/vendor/golang.org/x/text/width/tables.go b/vendor/golang.org/x/text/width/tables.go index e21f0b8385..710fd75254 100644 --- a/vendor/golang.org/x/text/width/tables.go +++ b/vendor/golang.org/x/text/width/tables.go @@ -3,7 +3,7 @@ package width // UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "9.0.0" +const UnicodeVersion = "10.0.0" // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not @@ -175,7 +175,7 @@ func (t *widthTrie) lookupStringUnsafe(s string) uint16 { return 0 } -// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. type widthTrie struct{} func newWidthTrie(i int) *widthTrie { @@ -190,9 +190,9 @@ func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { } } -// widthValues: 99 blocks, 6336 entries, 12672 bytes +// widthValues: 101 blocks, 6464 entries, 12928 bytes // The third block is the zero block. -var widthValues = [6336]uint16{ +var widthValues = [6464]uint16{ // Block 0x0, offset 0x0 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, @@ -606,7 +606,7 @@ var widthValues = [6336]uint16{ 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, - 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, @@ -773,7 +773,7 @@ var widthValues = [6336]uint16{ 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, 0x11bd: 0x2000, // Block 0x47, offset 0x11c0 - 0x11e0: 0x4000, + 0x11e0: 0x4000, 0x11e1: 0x4000, // Block 0x48, offset 0x1200 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, @@ -794,109 +794,108 @@ var widthValues = [6336]uint16{ 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, // Block 0x4a, offset 0x1280 - 0x1280: 0x4000, 0x1281: 0x4000, + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, // Block 0x4b, offset 0x12c0 - 0x12c4: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, // Block 0x4c, offset 0x1300 - 0x130f: 0x4000, + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, // Block 0x4d, offset 0x1340 - 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, - 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, - 0x1350: 0x2000, 0x1351: 0x2000, - 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, - 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, - 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, - 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, - 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, - 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, - 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, - 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + 0x1344: 0x4000, // Block 0x4e, offset 0x1380 - 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, - 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, - 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, - 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, - 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, - 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, - 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, - 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, - 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, - 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + 0x138f: 0x4000, // Block 0x4f, offset 0x13c0 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, - 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, - 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, - 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, - 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, - 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, // Block 0x50, offset 0x1400 - 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, - 0x1410: 0x4000, 0x1411: 0x4000, - 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, - 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, - 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, - 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, - 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, - 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, - 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, // Block 0x51, offset 0x1440 - 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, - 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, - 0x1450: 0x4000, 0x1451: 0x4000, + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, // Block 0x52, offset 0x1480 - 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, - 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, - 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, - 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, - 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, - 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, - 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, // Block 0x53, offset 0x14c0 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, - 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, - 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, - 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, - 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, - 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, - 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, - 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, - 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, - 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, - 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, // Block 0x54, offset 0x1500 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, - 0x1512: 0x4000, 0x1513: 0x4000, - 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, - 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, - 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, - 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, // Block 0x55, offset 0x1540 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, - 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, - 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, - 0x1552: 0x4000, 0x1553: 0x4000, - 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, - 0x1570: 0x4000, 0x1574: 0x4000, - 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, - 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, // Block 0x56, offset 0x1580 - 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, - 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, - 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, - 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, @@ -904,18 +903,17 @@ var widthValues = [6336]uint16{ 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, // Block 0x57, offset 0x15c0 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, - 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, - 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, - 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, - 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, - 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, - 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, - 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, - 0x15fc: 0x4000, 0x15ff: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, // Block 0x58, offset 0x1600 - 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, @@ -925,66 +923,99 @@ var widthValues = [6336]uint16{ 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, - 0x163c: 0x4000, 0x163d: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, // Block 0x59, offset 0x1640 - 0x164b: 0x4000, - 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, - 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, - 0x167a: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, // Block 0x5a, offset 0x1680 - 0x1695: 0x4000, 0x1696: 0x4000, - 0x16a4: 0x4000, + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, // Block 0x5b, offset 0x16c0 - 0x16fb: 0x4000, - 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, // Block 0x5c, offset 0x1700 - 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, - 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, - 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, // Block 0x5d, offset 0x1740 - 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, - 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, - 0x1752: 0x4000, - 0x176b: 0x4000, 0x176c: 0x4000, - 0x1774: 0x4000, 0x1775: 0x4000, - 0x1776: 0x4000, + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, // Block 0x5e, offset 0x1780 - 0x1790: 0x4000, 0x1791: 0x4000, - 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, - 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, - 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, - 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, - 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, - 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, - 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, // Block 0x5f, offset 0x17c0 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, - 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, - 0x17d0: 0x4000, 0x17d1: 0x4000, - 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, - 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, - 0x17de: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, // Block 0x60, offset 0x1800 - 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, - 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, - 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, // Block 0x61, offset 0x1840 - 0x1840: 0x4000, + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, // Block 0x62, offset 0x1880 - 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, - 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, - 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, - 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, - 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, - 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, - 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, - 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, - 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, - 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, - 0x18bc: 0x2000, 0x18bd: 0x2000, + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, } // widthIndex: 22 blocks, 1408 entries, 1408 bytes @@ -1076,13 +1107,14 @@ var widthIndex = [1408]uint8{ 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, // Block 0xf, offset 0x3c0 - 0x3c0: 0x48, + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, // Block 0x10, offset 0x400 - 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, - 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, - 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, - 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, - 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, // Block 0x11, offset 0x440 0x456: 0x0b, 0x457: 0x06, 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, @@ -1100,7 +1132,7 @@ var widthIndex = [1408]uint8{ 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, - 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, // Block 0x14, offset 0x500 0x520: 0x10, 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, @@ -1281,4 +1313,4 @@ var inverseData = [150][4]byte{ {0x03, 0xe2, 0x97, 0x25}, } -// Total table size 14680 bytes (14KiB) +// Total table size 14936 bytes (14KiB) diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 95ca7af201..40b9508d74 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/bvUbOBPnfuX4gDZ5aBr7PZU4ZJM\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/ccMQJOTqWSBs7vqsZTir487_SNI\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20170905", + "revision": "20171010", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -20,7 +20,7 @@ "basePath": "/compute/v1/projects/", "rootUrl": "https://www.googleapis.com/", "servicePath": "compute/v1/projects/", - "batchPath": "batch", + "batchPath": "batch/compute/v1", "parameters": { "alt": { "type": "string", @@ -182,6 +182,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -213,6 +294,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -237,9 +399,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -250,7 +416,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -270,6 +438,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -336,7 +510,21 @@ "properties": { "address": { "type": "string", - "description": "The static external IP address represented by this resource." + "description": "The static IP address represented by this resource." + }, + "addressType": { + "type": "string", + "description": "The type of address to reserve. If unspecified, defaults to EXTERNAL.", + "enum": [ + "EXTERNAL", + "INTERNAL", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ] }, "creationTimestamp": { "type": "string", @@ -390,7 +578,7 @@ }, "status": { "type": "string", - "description": "[Output Only] The status of the address, which can be either IN_USE or RESERVED. An address that is RESERVED is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", + "description": "[Output Only] The status of the address, which can be one of RESERVING, RESERVED, or IN_USE. An address that is RESERVING is currently in the process of being reserved. A RESERVED address is currently reserved and available to use. An IN_USE address is currently being used by another resource and is not available.", "enum": [ "IN_USE", "RESERVED" @@ -400,6 +588,10 @@ "" ] }, + "subnetwork": { + "type": "string", + "description": "For external addresses, this field should not be used.\n\nThe URL of the subnetwork in which to reserve the address. If an IP address is specified, it must be within the subnetwork's IP range." + }, "users": { "type": "array", "description": "[Output Only] The URLs of the resources that are using this address.", @@ -437,6 +629,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -468,6 +741,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -492,9 +846,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -505,7 +863,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -525,6 +885,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -636,7 +1002,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nIf desired, you can also attach existing non-root persistent disks using this property. This field is only applicable for persistent disks.\n\nNote that for InstanceTemplate, specify the disk name, not the URL for the disk." }, "type": { "type": "string", @@ -672,7 +1038,7 @@ }, "sourceImage": { "type": "string", - "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." + "description": "The source image to create this disk. When creating a new instance, one of initializeParams.sourceImage or disks.source is required except for local SSD.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family \n\nIf the source image is deleted later, this field will not be set." }, "sourceImageEncryptionKey": { "$ref": "CustomerEncryptionKey", @@ -786,6 +1152,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -817,6 +1264,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -889,9 +1417,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -902,7 +1434,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -922,6 +1456,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1006,11 +1546,11 @@ "properties": { "metric": { "type": "string", - "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values and should be a utilization metric, which means that the number of virtual machines handling requests should increase or decrease proportionally to the metric.\n\nThe metric must have a value type of INT64 or DOUBLE." + "description": "The identifier (type) of the Stackdriver Monitoring metric. The metric cannot have negative values.\n\nThe metric must have a value type of INT64 or DOUBLE." }, "utilizationTarget": { "type": "number", - "description": "The target value of the metric that autoscaler should maintain. This must be a positive value.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", + "description": "The target value of the metric that autoscaler should maintain. This must be a positive value. A utilization metric scales number of virtual machines handling requests to increase or decrease proportionally to the metric.\n\nFor example, a good metric to use as a utilization_target is compute.googleapis.com/instance/network/received_bytes_count. The autoscaler will work to keep this value constant for each of the instances.", "format": "double" }, "utilizationTargetType": { @@ -1170,6 +1710,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1342,6 +1963,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1421,6 +2123,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1445,9 +2228,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1458,7 +2245,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1478,6 +2267,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -1667,6 +2462,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1698,6 +2574,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -1722,9 +2679,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -1735,7 +2696,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -1755,6 +2718,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2023,6 +2992,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2054,6 +3104,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2150,6 +3281,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2181,6 +3393,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2205,9 +3498,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2218,7 +3515,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2238,6 +3537,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2298,9 +3603,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2311,7 +3620,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2331,6 +3642,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2472,6 +3789,13 @@ "type": "string" } }, + "sourceServiceAccounts": { + "type": "array", + "description": "If source service accounts are specified, the firewall will apply only to traffic originating from an instance with a service account in this list. Source service accounts cannot be used to control traffic to an instance's external IP address because service accounts are associated with an instance, not an IP address. sourceRanges can be set at the same time as sourceServiceAccounts. If both are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP belongs to an instance with service account listed in sourceServiceAccount. The connection does not need to match both properties for the firewall to apply. sourceServiceAccounts cannot be used at the same time as sourceTags or targetTags.", + "items": { + "type": "string" + } + }, "sourceTags": { "type": "array", "description": "If source tags are specified, the firewall rule applies only to traffic with source IPs that match the primary network interfaces of VM instances that have the tag and are in the same VPC network. Source tags cannot be used to control traffic to an instance's external IP address, it only applies to traffic between instances in the same virtual network. Because tags are associated with instances, not IP addresses. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", @@ -2479,9 +3803,16 @@ "type": "string" } }, + "targetServiceAccounts": { + "type": "array", + "description": "A list of service accounts indicating sets of instances located in the network that may make network connections as specified in allowed[]. targetServiceAccounts cannot be used at the same time as targetTags or sourceTags. If neither targetServiceAccounts nor targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + } + }, "targetTags": { "type": "array", - "description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "description": "A list of tags that controls which instances the firewall rule applies to. If targetTags are specified, then the firewall rule applies only to instances in the VPC network that have one of those tags. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", "items": { "type": "string" } @@ -2516,6 +3847,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2526,7 +3938,7 @@ "properties": { "IPAddress": { "type": "string", - "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP. For regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nAddresses are restricted based on the forwarding rule's load balancing scheme (EXTERNAL or INTERNAL) and scope (global or regional).\n\nWhen the load balancing scheme is EXTERNAL, for global forwarding rules, the address must be a global IP, and for regional forwarding rules, the address must live in the same region as the forwarding rule. If this field is empty, an ephemeral IPv4 address from the same scope (global or regional) will be assigned. A regional forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4 or IPv6.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnet configured for the forwarding rule. By default, if this field is empty, an ephemeral internal IP address will be automatically allocated from the IP range of the subnet or network configured for this forwarding rule.\n\nAn address can be specified either by a literal IP address or a URL reference to an existing Address resource. The following examples are all valid: \n- 100.1.2.3 \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address \n- projects/project/regions/region/addresses/address \n- regions/region/addresses/address \n- global/addresses/address \n- address" }, "IPProtocol": { "type": "string", @@ -2664,6 +4076,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2695,6 +4188,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -2719,9 +4293,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -2732,7 +4310,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -2752,6 +4332,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -2805,7 +4391,7 @@ "properties": { "type": { "type": "string", - "description": "The type of supported feature. Currently only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "The type of supported feature. Currently only VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "VIRTIO_SCSI_MULTIQUEUE", @@ -3001,6 +4587,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3162,6 +4829,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3260,6 +5008,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3296,7 +5125,7 @@ }, "guestOsFeatures": { "type": "array", - "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.\n\nFor new Windows images, the server might also populate this field with the value WINDOWS, to indicate that this is a Windows image. This value is purely informational and does not enable or disable any features.", + "description": "A list of features to enable on the guest OS. Applicable for bootable images only. Currently, only one feature can be enabled, VIRTIO_SCSI_MULTIQUEUE, which allows each virtual CPU to have its own queue. For Windows images, you can only enable VIRTIO_SCSI_MULTIQUEUE on images with driver version 1.2.0.1621 or higher. Linux images with kernel versions 3.17 and higher will support VIRTIO_SCSI_MULTIQUEUE.\n\nFor newer Windows images, the server might also populate this property with the value WINDOWS to indicate that this is a Windows image.", "items": { "$ref": "GuestOsFeature" } @@ -3390,6 +5219,18 @@ "type": "string", "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." }, + "sourceImage": { + "type": "string", + "description": "URL of the source image used to create this image. This can be a full or valid partial URL. You must provide exactly one of: \n- this property, or \n- the rawDisk.source property, or \n- the sourceDisk property in order to create an image." + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, + "sourceImageId": { + "type": "string", + "description": "[Output Only] The ID value of the image used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given image name." + }, "sourceType": { "type": "string", "description": "The type of the image used to create this disk. The default and only value is RAW", @@ -3445,6 +5286,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3465,6 +5387,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion." + }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." @@ -3518,6 +5444,10 @@ "$ref": "Metadata", "description": "The metadata key/value pairs assigned to this instance. This includes custom metadata and predefined keys." }, + "minCpuPlatform": { + "type": "string", + "description": "Specifies a minimum CPU platform for the VM instance. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\"." + }, "name": { "type": "string", "description": "The name of the resource, provided by the client when initially creating the resource. The resource name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", @@ -3620,6 +5550,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3722,6 +5733,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3753,6 +5845,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3930,6 +6103,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -3961,6 +6215,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4037,9 +6372,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4050,7 +6389,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4070,6 +6411,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4166,6 +6513,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4221,9 +6649,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4234,7 +6666,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4254,6 +6688,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4328,6 +6768,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4392,6 +6913,10 @@ "$ref": "Metadata", "description": "The metadata key/value pairs to assign to instances that are created from this template. These pairs can consist of custom metadata or predefined keys. See Project and instance metadata for more information." }, + "minCpuPlatform": { + "type": "string", + "description": "Minimum cpu/platform to be used by this instance. The instance may be scheduled on the specified or newer cpu/platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: \"Intel Haswell\" or minCpuPlatform: \"Intel Sandy Bridge\". For more information, read Specifying a Minimum CPU Platform." + }, "networkInterfaces": { "type": "array", "description": "An array of network access configurations for this interface.", @@ -4497,6 +7022,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4562,9 +7168,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4575,7 +7185,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4595,6 +7207,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -4663,6 +7281,16 @@ } } }, + "InstancesSetMinCpuPlatformRequest": { + "id": "InstancesSetMinCpuPlatformRequest", + "type": "object", + "properties": { + "minCpuPlatform": { + "type": "string", + "description": "Minimum cpu/platform this instance should be started at." + } + } + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "type": "object", @@ -4693,6 +7321,987 @@ } } }, + "Interconnect": { + "id": "Interconnect", + "type": "object", + "description": "Protocol definitions for Mixer API to support Interconnect. Next available tag: 25", + "properties": { + "adminEnabled": { + "type": "boolean", + "description": "Administrative status of the interconnect. When this is set to ?true?, the Interconnect is functional and may carry traffic (assuming there are functional InterconnectAttachments and other requirements are satisfied). When set to ?false?, no packets will be carried over this Interconnect and no BGP routes will be exchanged over it. By default, it is set to ?true?." + }, + "circuitInfos": { + "type": "array", + "description": "[Output Only] List of CircuitInfo objects, that describe the individual circuits in this LAG.", + "items": { + "$ref": "InterconnectCircuitInfo" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customerName": { + "type": "string", + "description": "Customer name, to put in the Letter of Authorization as the party authorized to request a crossconnect." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "expectedOutages": { + "type": "array", + "description": "[Output Only] List of outages expected for this Interconnect.", + "items": { + "$ref": "InterconnectOutageNotification" + } + }, + "googleIpAddress": { + "type": "string", + "description": "[Output Only] IP address configured on the Google side of the Interconnect link. This can be used only for ping tests." + }, + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID; to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interconnectAttachments": { + "type": "array", + "description": "[Output Only] A list of the URLs of all InterconnectAttachments configured to use this Interconnect.", + "items": { + "type": "string" + } + }, + "interconnectType": { + "type": "string", + "enum": [ + "DEDICATED", + "IT_PRIVATE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", + "default": "compute#interconnect" + }, + "linkType": { + "type": "string", + "enum": [ + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "" + ] + }, + "location": { + "type": "string", + "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned." + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.interconnects.insert" + ] + } + }, + "nocContactEmail": { + "type": "string", + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications." + }, + "operationalStatus": { + "type": "string", + "description": "[Output Only] The current status of whether or not this Interconnect is functional.", + "enum": [ + "ACTIVE", + "OS_ACTIVE", + "OS_UNPROVISIONED", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "peerIpAddress": { + "type": "string", + "description": "[Output Only] IP address configured on the customer side of the Interconnect link. The customer should configure this IP address during turnup when prompted by Google NOC. This can be used only for ping tests." + }, + "provisionedLinkCount": { + "type": "integer", + "description": "[Output Only] Number of links actually provisioned in this interconnect.", + "format": "int32" + }, + "requestedLinkCount": { + "type": "integer", + "description": "Target number of physical links in the link bundle, as requested by the customer.", + "format": "int32" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectAttachment": { + "id": "InterconnectAttachment", + "type": "object", + "description": "Protocol definitions for Mixer API to support InterconnectAttachment. Next available tag: 23", + "properties": { + "cloudRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on Cloud Router Interface for this interconnect attachment." + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "customerRouterIpAddress": { + "type": "string", + "description": "[Output Only] IPv4 address + prefix length to be configured on the customer router subinterface for this interconnect attachment." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "googleReferenceId": { + "type": "string", + "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interconnect": { + "type": "string", + "description": "URL of the underlying Interconnect object that this attachment's traffic will traverse through." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", + "default": "compute#interconnectAttachment" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "operationalStatus": { + "type": "string", + "description": "[Output Only] The current status of whether or not this interconnect attachment is functional.", + "enum": [ + "ACTIVE", + "OS_ACTIVE", + "OS_UNPROVISIONED", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "privateInterconnectInfo": { + "$ref": "InterconnectAttachmentPrivateInfo", + "description": "[Output Only] Information specific to a Private InterconnectAttachment. Only populated if the interconnect that this is attached is of type IT_PRIVATE." + }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional interconnect attachment resides." + }, + "router": { + "type": "string", + "description": "URL of the cloud router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network & region within which the Cloud Router is configured." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectAttachmentAggregatedList": { + "id": "InterconnectAttachmentAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A list of InterconnectAttachmentsScopedList resources.", + "additionalProperties": { + "$ref": "InterconnectAttachmentsScopedList", + "description": "Name of the scope containing this set of interconnect attachments." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentAggregatedList for aggregated lists of interconnect attachments.", + "default": "compute#interconnectAttachmentAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectAttachmentList": { + "id": "InterconnectAttachmentList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnect attachments.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of InterconnectAttachment resources.", + "items": { + "$ref": "InterconnectAttachment" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectAttachmentList for lists of interconnect attachments.", + "default": "compute#interconnectAttachmentList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectAttachmentPrivateInfo": { + "id": "InterconnectAttachmentPrivateInfo", + "type": "object", + "description": "Private information for an interconnect attachment when this belongs to an interconnect of type IT_PRIVATE.", + "properties": { + "tag8021q": { + "type": "integer", + "description": "[Output Only] 802.1q encapsulation tag to be used for traffic between Google and the customer, going to and from this network and region.", + "format": "uint32" + } + } + }, + "InterconnectAttachmentsScopedList": { + "id": "InterconnectAttachmentsScopedList", + "type": "object", + "properties": { + "interconnectAttachments": { + "type": "array", + "description": "List of interconnect attachments contained in this scope.", + "items": { + "$ref": "InterconnectAttachment" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectCircuitInfo": { + "id": "InterconnectCircuitInfo", + "type": "object", + "description": "Describes a single physical circuit between the Customer and Google. CircuitInfo objects are created by Google, so all fields are output only. Next id: 4", + "properties": { + "customerDemarcId": { + "type": "string", + "description": "Customer-side demarc ID for this circuit. This will only be set if it was provided by the Customer to Google during circuit turn-up." + }, + "googleCircuitId": { + "type": "string", + "description": "Google-assigned unique ID for this circuit. Assigned at circuit turn-up." + }, + "googleDemarcId": { + "type": "string", + "description": "Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by Google to the customer in the LOA." + } + } + }, + "InterconnectList": { + "id": "InterconnectList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnects.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Interconnect resources.", + "items": { + "$ref": "Interconnect" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectList for lists of interconnects.", + "default": "compute#interconnectList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectLocation": { + "id": "InterconnectLocation", + "type": "object", + "description": "Protocol definitions for Mixer API to support InterconnectLocation.", + "properties": { + "address": { + "type": "string", + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character." + }, + "availabilityZone": { + "type": "string", + "description": "Availability zone for this location. Within a city, maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\"." + }, + "city": { + "type": "string", + "description": "City designator used by the Interconnect UI to locate this InterconnectLocation within the Continent. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\"." + }, + "continent": { + "type": "string", + "description": "Continent for this location. Used by the location picker in the Interconnect UI.", + "enum": [ + "AFRICA", + "ASIA_PAC", + "C_AFRICA", + "C_ASIA_PAC", + "C_EUROPE", + "C_NORTH_AMERICA", + "C_SOUTH_AMERICA", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "[Output Only] An optional description of the resource." + }, + "facilityProvider": { + "type": "string", + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX)." + }, + "facilityProviderFacilityId": { + "type": "string", + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1)." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#interconnectLocation for interconnect locations.", + "default": "compute#interconnectLocation" + }, + "name": { + "type": "string", + "description": "[Output Only] Name of the resource." + }, + "peeringdbFacilityId": { + "type": "string", + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb)." + }, + "regionInfos": { + "type": "array", + "description": "[Output Only] A list of InterconnectLocation.RegionInfo objects, that describe parameters pertaining to the relation between this InterconnectLocation and various Google Cloud regions.", + "items": { + "$ref": "InterconnectLocationRegionInfo" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "InterconnectLocationList": { + "id": "InterconnectLocationList", + "type": "object", + "description": "Response to the list request, and contains a list of interconnect locations.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "array", + "description": "A list of InterconnectLocation resources.", + "items": { + "$ref": "InterconnectLocation" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#interconnectLocationList for lists of interconnect locations.", + "default": "compute#interconnectLocationList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "InterconnectLocationRegionInfo": { + "id": "InterconnectLocationRegionInfo", + "type": "object", + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "properties": { + "expectedRttMs": { + "type": "string", + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64" + }, + "locationPresence": { + "type": "string", + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "region": { + "type": "string", + "description": "URL for the region of this location." + } + } + }, + "InterconnectOutageNotification": { + "id": "InterconnectOutageNotification", + "type": "object", + "description": "Description of a planned outage on this Interconnect. Next id: 9", + "properties": { + "affectedCircuits": { + "type": "array", + "description": "Iff issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", + "items": { + "type": "string" + } + }, + "description": { + "type": "string", + "description": "Short user-visible description of the purpose of the outage." + }, + "endTime": { + "type": "string", + "format": "int64" + }, + "issueType": { + "type": "string", + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "name": { + "type": "string", + "description": "Unique identifier for this outage notification." + }, + "source": { + "type": "string", + "enum": [ + "GOOGLE", + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "startTime": { + "type": "string", + "description": "Scheduled start and end times for the outage (milliseconds since Unix epoch).", + "format": "int64" + }, + "state": { + "type": "string", + "enum": [ + "ACTIVE", + "CANCELLED", + "NS_ACTIVE", + "NS_CANCELED" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + } + } + }, "License": { "id": "License", "type": "object", @@ -4836,6 +8445,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4867,6 +8557,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -4891,9 +8662,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -4904,7 +8679,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -4924,6 +8701,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5169,6 +8952,10 @@ "$ref": "NetworkPeering" } }, + "routingConfig": { + "$ref": "NetworkRoutingConfig", + "description": "The network-level routing configuration for this network. Used by Cloud Router to determine what type of network-wide routing behavior to enforce." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -5252,6 +9039,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5290,6 +9158,25 @@ } } }, + "NetworkRoutingConfig": { + "id": "NetworkRoutingConfig", + "type": "object", + "description": "A routing configuration attached to a network resource. The message includes the list of routers associated with the network, and a flag indicating the type of routing behavior to enforce network-wide.", + "properties": { + "routingMode": { + "type": "string", + "description": "The network-wide routing mode to use. If set to REGIONAL, this network's cloud routers will only advertise routes with subnetworks of this network in the same region as the router. If set to GLOBAL, this network's cloud routers will advertise routes with all subnetworks of this network, across regions.", + "enum": [ + "GLOBAL", + "REGIONAL" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, "NetworksAddPeeringRequest": { "id": "NetworksAddPeeringRequest", "type": "object", @@ -5457,9 +9344,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5470,7 +9361,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5490,6 +9383,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -5551,6 +9450,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5582,6 +9562,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -5606,9 +9667,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -5619,7 +9684,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -5639,6 +9706,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6034,6 +10107,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6065,6 +10219,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6096,6 +10331,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6206,6 +10522,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6278,6 +10675,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6430,9 +10908,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6443,7 +10925,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6463,6 +10947,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -6520,6 +11010,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6622,6 +11193,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6677,6 +11329,10 @@ "type": "string", "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." }, + "linkedInterconnectAttachment": { + "type": "string", + "description": "URI of the linked interconnect attachment. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." + }, "linkedVpnTunnel": { "type": "string", "description": "URI of the linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource and it could either be a VPN Tunnel or an interconnect attachment." @@ -6716,6 +11372,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -6854,9 +11591,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -6867,7 +11608,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -6887,6 +11630,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7117,7 +11866,7 @@ }, "storageBytes": { "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "description": "[Output Only] A size of the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", "format": "int64" }, "storageBytesStatus": { @@ -7162,6 +11911,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7235,6 +12065,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7327,6 +12238,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7358,6 +12350,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7407,9 +12480,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7420,7 +12497,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7440,6 +12519,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7597,6 +12682,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7686,6 +12852,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7769,6 +13016,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7800,6 +13128,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -7824,9 +13233,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -7837,7 +13250,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -7857,6 +13272,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -7992,6 +13413,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8040,6 +13542,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8116,9 +13699,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8129,7 +13716,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8149,6 +13738,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8312,6 +13907,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8421,6 +14097,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8534,6 +14291,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8565,6 +14403,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -8589,9 +14508,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -8602,7 +14525,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -8622,6 +14547,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -8760,6 +14691,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9006,6 +15018,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9037,6 +15130,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9061,9 +15235,13 @@ "enum": [ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", "NEXT_HOP_INSTANCE_NOT_FOUND", @@ -9074,7 +15252,9 @@ "REQUIRED_TOS_AGREEMENT", "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", "UNREACHABLE" ], "enumDescriptions": [ @@ -9094,6 +15274,12 @@ "", "", "", + "", + "", + "", + "", + "", + "", "" ] }, @@ -9149,6 +15335,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -9180,6 +15447,13 @@ "type": "object", "description": "A Zone resource.", "properties": { + "availableCpuPlatforms": { + "type": "array", + "description": "[Output Only] Available cpu/platform selections for the zone.", + "items": { + "type": "string" + } + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -9256,6 +15530,87 @@ "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for this resource." + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } } } }, @@ -14897,6 +21252,58 @@ "https://www.googleapis.com/auth/compute" ] }, + "setDeletionProtection": { + "id": "compute.instances.setDeletionProtection", + "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + "httpMethod": "POST", + "description": "Sets deletion protection on the instance.", + "parameters": { + "deletionProtection": { + "type": "boolean", + "description": "Whether the resource should be protected against deletion.", + "default": "true", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "resource": { + "type": "string", + "description": "Name of the resource for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "resource" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setDiskAutoDelete": { "id": "compute.instances.setDiskAutoDelete", "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete", @@ -15154,6 +21561,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "setMinCpuPlatform": { + "id": "compute.instances.setMinCpuPlatform", + "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + "httpMethod": "POST", + "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesSetMinCpuPlatformRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setScheduling": { "id": "compute.instances.setScheduling", "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", @@ -15400,7 +21856,7 @@ "id": "compute.instances.stop", "path": "{project}/zones/{zone}/instances/{instance}/stop", "httpMethod": "POST", - "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", "parameters": { "instance": { "type": "string", @@ -15444,6 +21900,531 @@ } } }, + "interconnectAttachments": { + "methods": { + "aggregatedList": { + "id": "compute.interconnectAttachments.aggregatedList", + "path": "{project}/aggregated/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of interconnect attachments.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectAttachmentAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.interconnectAttachments.delete", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect attachment.", + "parameters": { + "interconnectAttachment": { + "type": "string", + "description": "Name of the interconnect attachment to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.interconnectAttachments.get", + "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + "httpMethod": "GET", + "description": "Returns the specified interconnect attachment.", + "parameters": { + "interconnectAttachment": { + "type": "string", + "description": "Name of the interconnect attachment to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "interconnectAttachment" + ], + "response": { + "$ref": "InterconnectAttachment" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.interconnectAttachments.insert", + "path": "{project}/regions/{region}/interconnectAttachments", + "httpMethod": "POST", + "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "InterconnectAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.interconnectAttachments.list", + "path": "{project}/regions/{region}/interconnectAttachments", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect attachments contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "InterconnectAttachmentList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "interconnectLocations": { + "methods": { + "get": { + "id": "compute.interconnectLocations.get", + "path": "{project}/global/interconnectLocations/{interconnectLocation}", + "httpMethod": "GET", + "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + "parameters": { + "interconnectLocation": { + "type": "string", + "description": "Name of the interconnect location to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "interconnectLocation" + ], + "response": { + "$ref": "InterconnectLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.interconnectLocations.list", + "path": "{project}/global/interconnectLocations", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect locations available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "interconnects": { + "methods": { + "delete": { + "id": "compute.interconnects.delete", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "DELETE", + "description": "Deletes the specified interconnect.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.interconnects.get", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "GET", + "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "response": { + "$ref": "Interconnect" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.interconnects.insert", + "path": "{project}/global/interconnects", + "httpMethod": "POST", + "description": "Creates a Interconnect in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.interconnects.list", + "path": "{project}/global/interconnects", + "httpMethod": "GET", + "description": "Retrieves the list of interconnect available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + "default": "500", + "format": "uint32", + "minimum": "0", + "location": "query" + }, + "orderBy": { + "type": "string", + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InterconnectList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.interconnects.patch", + "path": "{project}/global/interconnects/{interconnect}", + "httpMethod": "PATCH", + "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "parameters": { + "interconnect": { + "type": "string", + "description": "Name of the interconnect to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "interconnect" + ], + "request": { + "$ref": "Interconnect" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "licenses": { "methods": { "get": { @@ -15831,6 +22812,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "id": "compute.networks.patch", + "path": "{project}/global/networks/{network}", + "httpMethod": "PATCH", + "description": "Patches the specified network with the data included in the request.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "requestId": { + "type": "string", + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + "location": "query" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "removePeering": { "id": "compute.networks.removePeering", "path": "{project}/global/networks/{network}/removePeering", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 422d803fa0..b77b8ba066 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -91,6 +91,9 @@ func New(client *http.Client) (*Service, error) { s.InstanceGroups = NewInstanceGroupsService(s) s.InstanceTemplates = NewInstanceTemplatesService(s) s.Instances = NewInstancesService(s) + s.InterconnectAttachments = NewInterconnectAttachmentsService(s) + s.InterconnectLocations = NewInterconnectLocationsService(s) + s.Interconnects = NewInterconnectsService(s) s.Licenses = NewLicensesService(s) s.MachineTypes = NewMachineTypesService(s) s.Networks = NewNetworksService(s) @@ -166,6 +169,12 @@ type Service struct { Instances *InstancesService + InterconnectAttachments *InterconnectAttachmentsService + + InterconnectLocations *InterconnectLocationsService + + Interconnects *InterconnectsService + Licenses *LicensesService MachineTypes *MachineTypesService @@ -408,6 +417,33 @@ type InstancesService struct { s *Service } +func NewInterconnectAttachmentsService(s *Service) *InterconnectAttachmentsService { + rs := &InterconnectAttachmentsService{s: s} + return rs +} + +type InterconnectAttachmentsService struct { + s *Service +} + +func NewInterconnectLocationsService(s *Service) *InterconnectLocationsService { + rs := &InterconnectLocationsService{s: s} + return rs +} + +type InterconnectLocationsService struct { + s *Service +} + +func NewInterconnectsService(s *Service) *InterconnectsService { + rs := &InterconnectsService{s: s} + return rs +} + +type InterconnectsService struct { + s *Service +} + func NewLicensesService(s *Service) *LicensesService { rs := &LicensesService{s: s} return rs @@ -775,6 +811,9 @@ type AcceleratorTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -802,6 +841,108 @@ func (s *AcceleratorTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeAggregatedListWarning: [Output Only] Informational +// warning message. +type AcceleratorTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorTypeList: Contains a list of accelerator types. type AcceleratorTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -826,6 +967,9 @@ type AcceleratorTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AcceleratorTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -853,6 +997,108 @@ func (s *AcceleratorTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AcceleratorTypeListWarning: [Output Only] Informational warning +// message. +type AcceleratorTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AcceleratorTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AcceleratorTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AcceleratorTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AcceleratorTypesScopedList struct { // AcceleratorTypes: [Output Only] List of accelerator types contained // in this scope. @@ -896,9 +1142,13 @@ type AcceleratorTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -909,7 +1159,9 @@ type AcceleratorTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1033,9 +1285,18 @@ func (s *AccessConfig) MarshalJSON() ([]byte, error) { // Address: A reserved address resource. type Address struct { - // Address: The static external IP address represented by this resource. + // Address: The static IP address represented by this resource. Address string `json:"address,omitempty"` + // AddressType: The type of address to reserve. If unspecified, defaults + // to EXTERNAL. + // + // Possible values: + // "EXTERNAL" + // "INTERNAL" + // "UNSPECIFIED_TYPE" + AddressType string `json:"addressType,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -1078,16 +1339,24 @@ type Address struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Status: [Output Only] The status of the address, which can be either - // IN_USE or RESERVED. An address that is RESERVED is currently reserved - // and available to use. An IN_USE address is currently being used by - // another resource and is not available. + // Status: [Output Only] The status of the address, which can be one of + // RESERVING, RESERVED, or IN_USE. An address that is RESERVING is + // currently in the process of being reserved. A RESERVED address is + // currently reserved and available to use. An IN_USE address is + // currently being used by another resource and is not available. // // Possible values: // "IN_USE" // "RESERVED" Status string `json:"status,omitempty"` + // Subnetwork: For external addresses, this field should not be + // used. + // + // The URL of the subnetwork in which to reserve the address. If an IP + // address is specified, it must be within the subnetwork's IP range. + Subnetwork string `json:"subnetwork,omitempty"` + // Users: [Output Only] The URLs of the resources that are using this // address. Users []string `json:"users,omitempty"` @@ -1142,6 +1411,9 @@ type AddressAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1169,6 +1441,108 @@ func (s *AddressAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AddressAggregatedListWarning: [Output Only] Informational warning +// message. +type AddressAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AddressList: Contains a list of addresses. type AddressList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -1193,6 +1567,9 @@ type AddressList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AddressListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1220,6 +1597,107 @@ func (s *AddressList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AddressListWarning: [Output Only] Informational warning message. +type AddressListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AddressListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarning) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AddressListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AddressListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AddressListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AddressesScopedList struct { // Addresses: [Output Only] List of addresses contained in this scope. Addresses []*Address `json:"addresses,omitempty"` @@ -1261,9 +1739,13 @@ type AddressesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1274,7 +1756,9 @@ type AddressesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -1476,7 +1960,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. When creating a new instance, one of - // initializeParams.sourceImage or disks.source is required. + // initializeParams.sourceImage or disks.source is required except for + // local SSD. // // If desired, you can also attach existing non-root persistent disks // using this property. This field is only applicable for persistent @@ -1552,7 +2037,7 @@ type AttachedDiskInitializeParams struct { // SourceImage: The source image to create this disk. When creating a // new instance, one of initializeParams.sourceImage or disks.source is - // required. + // required except for local SSD. // // To create a disk with one of the public operating system images, // specify the image by its family name. For example, specify @@ -1732,6 +2217,9 @@ type AutoscalerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1759,6 +2247,108 @@ func (s *AutoscalerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerAggregatedListWarning: [Output Only] Informational warning +// message. +type AutoscalerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AutoscalerList: Contains a list of Autoscaler resources. type AutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -1783,6 +2373,9 @@ type AutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *AutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1810,6 +2403,107 @@ func (s *AutoscalerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AutoscalerListWarning: [Output Only] Informational warning message. +type AutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*AutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod AutoscalerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AutoscalerStatusDetails struct { // Message: The status message. Message string `json:"message,omitempty"` @@ -1899,9 +2593,13 @@ type AutoscalersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -1912,7 +2610,9 @@ type AutoscalersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -2108,16 +2808,15 @@ func (s *AutoscalingPolicyCpuUtilization) UnmarshalJSON(data []byte) error { // policy. type AutoscalingPolicyCustomMetricUtilization struct { // Metric: The identifier (type) of the Stackdriver Monitoring metric. - // The metric cannot have negative values and should be a utilization - // metric, which means that the number of virtual machines handling - // requests should increase or decrease proportionally to the - // metric. + // The metric cannot have negative values. // // The metric must have a value type of INT64 or DOUBLE. Metric string `json:"metric,omitempty"` // UtilizationTarget: The target value of the metric that autoscaler - // should maintain. This must be a positive value. + // should maintain. This must be a positive value. A utilization metric + // scales number of virtual machines handling requests to increase or + // decrease proportionally to the metric. // // For example, a good metric to use as a utilization_target is // compute.googleapis.com/instance/network/received_bytes_count. The @@ -2431,6 +3130,9 @@ type BackendBucketList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendBucketListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2458,6 +3160,108 @@ func (s *BackendBucketList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendBucketListWarning: [Output Only] Informational warning +// message. +type BackendBucketListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendBucketListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendBucketListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendBucketListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { @@ -2651,6 +3455,9 @@ type BackendServiceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2678,6 +3485,108 @@ func (s *BackendServiceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceAggregatedListWarning: [Output Only] Informational +// warning message. +type BackendServiceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceCdnPolicy: Message containing Cloud CDN configuration // for a backend service. type BackendServiceCdnPolicy struct { @@ -2801,6 +3710,9 @@ type BackendServiceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2828,6 +3740,108 @@ func (s *BackendServiceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceListWarning: [Output Only] Informational warning +// message. +type BackendServiceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*BackendServiceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarning) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type BackendServicesScopedList struct { // BackendServices: List of BackendServices contained in this scope. BackendServices []*BackendService `json:"backendServices,omitempty"` @@ -2870,9 +3884,13 @@ type BackendServicesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -2883,7 +3901,9 @@ type BackendServicesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3170,6 +4190,9 @@ type CommitmentAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3197,6 +4220,108 @@ func (s *CommitmentAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CommitmentAggregatedListWarning: [Output Only] Informational warning +// message. +type CommitmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // CommitmentList: Contains a list of Commitment resources. type CommitmentList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -3221,6 +4346,9 @@ type CommitmentList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *CommitmentListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3248,6 +4376,107 @@ func (s *CommitmentList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CommitmentListWarning: [Output Only] Informational warning message. +type CommitmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*CommitmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type CommitmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CommitmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod CommitmentListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type CommitmentsScopedList struct { // Commitments: [Output Only] List of commitments contained in this // scope. @@ -3290,9 +4519,13 @@ type CommitmentsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -3303,7 +4536,9 @@ type CommitmentsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -3756,6 +4991,9 @@ type DiskAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3783,6 +5021,108 @@ func (s *DiskAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskList: A list of Disk resources. type DiskList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -3807,6 +5147,9 @@ type DiskList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3834,6 +5177,107 @@ func (s *DiskList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskListWarning: [Output Only] Informational warning message. +type DiskListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskMoveRequest struct { // DestinationZone: The URL of the destination zone to move the disk. // This can be a full or partial URL. For example, the following are all @@ -3966,6 +5410,9 @@ type DiskTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -3993,6 +5440,108 @@ func (s *DiskTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type DiskTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskTypeList: Contains a list of disk types. type DiskTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4017,6 +5566,9 @@ type DiskTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *DiskTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4044,6 +5596,107 @@ func (s *DiskTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DiskTypeListWarning: [Output Only] Informational warning message. +type DiskTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*DiskTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod DiskTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type DiskTypesScopedList struct { // DiskTypes: [Output Only] List of disk types contained in this scope. DiskTypes []*DiskType `json:"diskTypes,omitempty"` @@ -4085,9 +5738,13 @@ type DiskTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4098,7 +5755,9 @@ type DiskTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4240,9 +5899,13 @@ type DisksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4253,7 +5916,9 @@ type DisksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -4413,6 +6078,20 @@ type Firewall struct { // the firewall to apply. Only IPv4 is supported. SourceRanges []string `json:"sourceRanges,omitempty"` + // SourceServiceAccounts: If source service accounts are specified, the + // firewall will apply only to traffic originating from an instance with + // a service account in this list. Source service accounts cannot be + // used to control traffic to an instance's external IP address because + // service accounts are associated with an instance, not an IP address. + // sourceRanges can be set at the same time as sourceServiceAccounts. If + // both are set, the firewall will apply to traffic that has source IP + // address within sourceRanges OR the source IP belongs to an instance + // with service account listed in sourceServiceAccount. The connection + // does not need to match both properties for the firewall to apply. + // sourceServiceAccounts cannot be used at the same time as sourceTags + // or targetTags. + SourceServiceAccounts []string `json:"sourceServiceAccounts,omitempty"` + // SourceTags: If source tags are specified, the firewall rule applies // only to traffic with source IPs that match the primary network // interfaces of VM instances that have the tag and are in the same VPC @@ -4427,10 +6106,19 @@ type Firewall struct { // the firewall to apply. SourceTags []string `json:"sourceTags,omitempty"` - // TargetTags: A list of instance tags indicating sets of instances - // located in the network that may make network connections as specified - // in allowed[]. If no targetTags are specified, the firewall rule + // TargetServiceAccounts: A list of service accounts indicating sets of + // instances located in the network that may make network connections as + // specified in allowed[]. targetServiceAccounts cannot be used at the + // same time as targetTags or sourceTags. If neither + // targetServiceAccounts nor targetTags are specified, the firewall rule // applies to all instances on the specified network. + TargetServiceAccounts []string `json:"targetServiceAccounts,omitempty"` + + // TargetTags: A list of tags that controls which instances the firewall + // rule applies to. If targetTags are specified, then the firewall rule + // applies only to instances in the VPC network that have one of those + // tags. If no targetTags are specified, the firewall rule applies to + // all instances on the specified network. TargetTags []string `json:"targetTags,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -4560,6 +6248,9 @@ type FirewallList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *FirewallListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4587,6 +6278,107 @@ func (s *FirewallList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// FirewallListWarning: [Output Only] Informational warning message. +type FirewallListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*FirewallListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarning) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type FirewallListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *FirewallListWarningData) MarshalJSON() ([]byte, error) { + type noMethod FirewallListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ForwardingRule: A ForwardingRule resource. A ForwardingRule resource // specifies which pool of target virtual machines to forward a packet // to if it matches the given [IPAddress, IPProtocol, ports] tuple. @@ -4594,19 +6386,34 @@ type ForwardingRule struct { // IPAddress: The IP address that this forwarding rule is serving on // behalf of. // - // For global forwarding rules, the address must be a global IP. For - // regional forwarding rules, the address must live in the same region - // as the forwarding rule. By default, this field is empty and an - // ephemeral IPv4 address from the same scope (global or regional) will - // be assigned. A regional forwarding rule supports IPv4 only. A global - // forwarding rule supports either IPv4 or IPv6. + // Addresses are restricted based on the forwarding rule's load + // balancing scheme (EXTERNAL or INTERNAL) and scope (global or + // regional). + // + // When the load balancing scheme is EXTERNAL, for global forwarding + // rules, the address must be a global IP, and for regional forwarding + // rules, the address must live in the same region as the forwarding + // rule. If this field is empty, an ephemeral IPv4 address from the same + // scope (global or regional) will be assigned. A regional forwarding + // rule supports IPv4 only. A global forwarding rule supports either + // IPv4 or IPv6. // // When the load balancing scheme is INTERNAL, this can only be an RFC - // 1918 IP address belonging to the network/subnetwork configured for - // the forwarding rule. A reserved address cannot be used. If the field - // is empty, the IP address will be automatically allocated from the - // internal IP range of the subnetwork or network configured for this - // forwarding rule. + // 1918 IP address belonging to the network/subnet configured for the + // forwarding rule. By default, if this field is empty, an ephemeral + // internal IP address will be automatically allocated from the IP range + // of the subnet or network configured for this forwarding rule. + // + // An address can be specified either by a literal IP address or a URL + // reference to an existing Address resource. The following examples are + // all valid: + // - 100.1.2.3 + // - + // https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address + // - projects/project/regions/region/addresses/address + // - regions/region/addresses/address + // - global/addresses/address + // - address IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options @@ -4796,6 +6603,9 @@ type ForwardingRuleAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4823,6 +6633,108 @@ func (s *ForwardingRuleAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleAggregatedListWarning: [Output Only] Informational +// warning message. +type ForwardingRuleAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ForwardingRuleList: Contains a list of ForwardingRule resources. type ForwardingRuleList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -4846,6 +6758,9 @@ type ForwardingRuleList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ForwardingRuleListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -4873,6 +6788,108 @@ func (s *ForwardingRuleList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ForwardingRuleListWarning: [Output Only] Informational warning +// message. +type ForwardingRuleListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ForwardingRuleListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarning) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ForwardingRuleListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ForwardingRuleListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ForwardingRuleListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ForwardingRulesScopedList struct { // ForwardingRules: List of forwarding rules contained in this scope. ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"` @@ -4915,9 +6932,13 @@ type ForwardingRulesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -4928,7 +6949,9 @@ type ForwardingRulesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -5049,8 +7072,7 @@ type GuestOsFeature struct { // Type: The type of supported feature. Currently only // VIRTIO_SCSI_MULTIQUEUE is supported. For newer Windows images, the // server might also populate this property with the value WINDOWS to - // indicate that this is a Windows image. This value is purely - // informational and does not enable or disable any features. + // indicate that this is a Windows image. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -5302,6 +7324,9 @@ type HealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5329,6 +7354,107 @@ func (s *HealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HealthCheckListWarning: [Output Only] Informational warning message. +type HealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HealthCheckReference: A full or valid partial URL to a health check. // For example, the following are valid URLs: // - @@ -5555,6 +7681,9 @@ type HttpHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5582,6 +7711,108 @@ func (s *HttpHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HttpsHealthCheck: An HttpsHealthCheck resource. This resource defines // a template for how individual instances should be checked for health, // via HTTPS. @@ -5695,6 +7926,9 @@ type HttpsHealthCheckList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *HttpsHealthCheckListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5722,6 +7956,108 @@ func (s *HttpsHealthCheckList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// HttpsHealthCheckListWarning: [Output Only] Informational warning +// message. +type HttpsHealthCheckListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*HttpsHealthCheckListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarning) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type HttpsHealthCheckListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HttpsHealthCheckListWarningData) MarshalJSON() ([]byte, error) { + type noMethod HttpsHealthCheckListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Image: An Image resource. type Image struct { // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google @@ -5758,10 +8094,9 @@ type Image struct { // higher. Linux images with kernel versions 3.17 and higher will // support VIRTIO_SCSI_MULTIQUEUE. // - // For new Windows images, the server might also populate this field - // with the value WINDOWS, to indicate that this is a Windows image. - // This value is purely informational and does not enable or disable any - // features. + // For newer Windows images, the server might also populate this + // property with the value WINDOWS to indicate that this is a Windows + // image. GuestOsFeatures []*GuestOsFeature `json:"guestOsFeatures,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5840,6 +8175,24 @@ type Image struct { // the current or a previous instance of a given disk name. SourceDiskId string `json:"sourceDiskId,omitempty"` + // SourceImage: URL of the source image used to create this image. This + // can be a full or valid partial URL. You must provide exactly one of: + // + // - this property, or + // - the rawDisk.source property, or + // - the sourceDisk property in order to create an image. + SourceImage string `json:"sourceImage,omitempty"` + + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + + // SourceImageId: [Output Only] The ID value of the image used to create + // this image. This value may be used to determine whether the image was + // taken from the current or a previous instance of a given image name. + SourceImageId string `json:"sourceImageId,omitempty"` + // SourceType: The type of the image used to create this disk. The // default and only value is RAW // @@ -5952,6 +8305,9 @@ type ImageList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ImageListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5979,6 +8335,107 @@ func (s *ImageList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ImageListWarning: [Output Only] Informational warning message. +type ImageListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ImageListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarning) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ImageListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ImageListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ImageListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Instance: An Instance resource. type Instance struct { // CanIpForward: Allows this instance to send and receive packets with @@ -5994,6 +8451,10 @@ type Instance struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DeletionProtection: Whether the resource should be protected against + // deletion. + DeletionProtection bool `json:"deletionProtection,omitempty"` + // Description: An optional description of this resource. Provide this // property when you create the resource. Description string `json:"description,omitempty"` @@ -6056,6 +8517,12 @@ type Instance struct { // This includes custom metadata and predefined keys. Metadata *Metadata `json:"metadata,omitempty"` + // MinCpuPlatform: Specifies a minimum CPU platform for the VM instance. + // Applicable values are the friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy + // Bridge". + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // Name: The name of the resource, provided by the client when initially // creating the resource. The resource name must be 1-63 characters // long, and comply with RFC1035. Specifically, the name must be 1-63 @@ -6171,6 +8638,9 @@ type InstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6198,6 +8668,108 @@ func (s *InstanceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceAggregatedListWarning: [Output Only] Informational warning +// message. +type InstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroup struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance group in RFC3339 text format. @@ -6311,6 +8883,9 @@ type InstanceGroupAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6338,6 +8913,108 @@ func (s *InstanceGroupAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceGroupAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupList: A list of InstanceGroup resources. type InstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -6362,6 +9039,9 @@ type InstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6389,6 +9069,108 @@ func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupListWarning: [Output Only] Informational warning +// message. +type InstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManager: An Instance Group Manager resource. type InstanceGroupManager struct { // BaseInstanceName: The base instance name to use for instances in this @@ -6587,6 +9369,9 @@ type InstanceGroupManagerAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6614,6 +9399,108 @@ func (s *InstanceGroupManagerAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerAggregatedListWarning: [Output Only] +// Informational warning message. +type InstanceGroupManagerAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManagerList: [Output Only] A list of managed instance // groups. type InstanceGroupManagerList struct { @@ -6640,6 +9527,9 @@ type InstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6667,6 +9557,108 @@ func (s *InstanceGroupManagerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerListWarning: [Output Only] Informational warning +// message. +type InstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupManagerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupManagerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupManagersAbandonInstancesRequest struct { // Instances: The URLs of one or more instances to abandon. This can be // a full URL or a partial URL, such as @@ -6832,9 +9824,13 @@ type InstanceGroupManagersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -6845,7 +9841,9 @@ type InstanceGroupManagersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7038,6 +10036,9 @@ type InstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7065,6 +10066,108 @@ func (s *InstanceGroupsListInstances) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type InstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceGroupsListInstancesWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceGroupsListInstancesRequest struct { // InstanceState: A filter for the state of the instances in the // instance group. Valid options are ALL or RUNNING. If you do not @@ -7170,9 +10273,13 @@ type InstanceGroupsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7183,7 +10290,9 @@ type InstanceGroupsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7315,6 +10424,9 @@ type InstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7342,6 +10454,107 @@ func (s *InstanceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceListWarning: [Output Only] Informational warning message. +type InstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceMoveRequest struct { // DestinationZone: The URL of the destination zone to move the // instance. This can be a full or partial URL. For example, the @@ -7421,6 +10634,14 @@ type InstanceProperties struct { // more information. Metadata *Metadata `json:"metadata,omitempty"` + // MinCpuPlatform: Minimum cpu/platform to be used by this instance. The + // instance may be scheduled on the specified or newer cpu/platform. + // Applicable values are the friendly names of CPU platforms, such as + // minCpuPlatform: "Intel Haswell" or minCpuPlatform: "Intel Sandy + // Bridge". For more information, read Specifying a Minimum CPU + // Platform. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + // NetworkInterfaces: An array of network access configurations for this // interface. NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` @@ -7577,6 +10798,9 @@ type InstanceTemplateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -7604,6 +10828,108 @@ func (s *InstanceTemplateList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceTemplateListWarning: [Output Only] Informational warning +// message. +type InstanceTemplateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InstanceTemplateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarning) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InstanceTemplateListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceWithNamedPorts struct { // Instance: [Output Only] The URL of the instance. Instance string `json:"instance,omitempty"` @@ -7689,9 +11015,13 @@ type InstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -7702,7 +11032,9 @@ type InstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -7865,6 +11197,35 @@ func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesSetMinCpuPlatformRequest struct { + // MinCpuPlatform: Minimum cpu/platform this instance should be started + // at. + MinCpuPlatform string `json:"minCpuPlatform,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MinCpuPlatform") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MinCpuPlatform") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesSetMinCpuPlatformRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesSetServiceAccountRequest struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` @@ -7930,6 +11291,1285 @@ func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Interconnect: Protocol definitions for Mixer API to support +// Interconnect. Next available tag: 25 +type Interconnect struct { + // AdminEnabled: Administrative status of the interconnect. When this is + // set to ?true?, the Interconnect is functional and may carry traffic + // (assuming there are functional InterconnectAttachments and other + // requirements are satisfied). When set to ?false?, no packets will be + // carried over this Interconnect and no BGP routes will be exchanged + // over it. By default, it is set to ?true?. + AdminEnabled bool `json:"adminEnabled,omitempty"` + + // CircuitInfos: [Output Only] List of CircuitInfo objects, that + // describe the individual circuits in this LAG. + CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomerName: Customer name, to put in the Letter of Authorization as + // the party authorized to request a crossconnect. + CustomerName string `json:"customerName,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // ExpectedOutages: [Output Only] List of outages expected for this + // Interconnect. + ExpectedOutages []*InterconnectOutageNotification `json:"expectedOutages,omitempty"` + + // GoogleIpAddress: [Output Only] IP address configured on the Google + // side of the Interconnect link. This can be used only for ping tests. + GoogleIpAddress string `json:"googleIpAddress,omitempty"` + + // GoogleReferenceId: [Output Only] Google reference ID; to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // InterconnectAttachments: [Output Only] A list of the URLs of all + // InterconnectAttachments configured to use this Interconnect. + InterconnectAttachments []string `json:"interconnectAttachments,omitempty"` + + // Possible values: + // "DEDICATED" + // "IT_PRIVATE" + InterconnectType string `json:"interconnectType,omitempty"` + + // Kind: [Output Only] Type of the resource. Always compute#interconnect + // for interconnects. + Kind string `json:"kind,omitempty"` + + // Possible values: + // "LINK_TYPE_ETHERNET_10G_LR" + LinkType string `json:"linkType,omitempty"` + + // Location: URL of the InterconnectLocation object that represents + // where this connection is to be provisioned. + Location string `json:"location,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // NocContactEmail: Email address to contact the customer NOC for + // operations and maintenance notifications regarding this Interconnect. + // If specified, this will be used for notifications in addition to all + // other forms described, such as Stackdriver logs alerting and Cloud + // Notifications. + NocContactEmail string `json:"nocContactEmail,omitempty"` + + // OperationalStatus: [Output Only] The current status of whether or not + // this Interconnect is functional. + // + // Possible values: + // "ACTIVE" + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + // "UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` + + // PeerIpAddress: [Output Only] IP address configured on the customer + // side of the Interconnect link. The customer should configure this IP + // address during turnup when prompted by Google NOC. This can be used + // only for ping tests. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // ProvisionedLinkCount: [Output Only] Number of links actually + // provisioned in this interconnect. + ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + + // RequestedLinkCount: Target number of physical links in the link + // bundle, as requested by the customer. + RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdminEnabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdminEnabled") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Interconnect) MarshalJSON() ([]byte, error) { + type noMethod Interconnect + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachment: Protocol definitions for Mixer API to support +// InterconnectAttachment. Next available tag: 23 +type InterconnectAttachment struct { + // CloudRouterIpAddress: [Output Only] IPv4 address + prefix length to + // be configured on Cloud Router Interface for this interconnect + // attachment. + CloudRouterIpAddress string `json:"cloudRouterIpAddress,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // CustomerRouterIpAddress: [Output Only] IPv4 address + prefix length + // to be configured on the customer router subinterface for this + // interconnect attachment. + CustomerRouterIpAddress string `json:"customerRouterIpAddress,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // GoogleReferenceId: [Output Only] Google reference ID, to be used when + // raising support tickets with Google or otherwise to debug backend + // connectivity issues. + GoogleReferenceId string `json:"googleReferenceId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Interconnect: URL of the underlying Interconnect object that this + // attachment's traffic will traverse through. + Interconnect string `json:"interconnect,omitempty"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectAttachment for interconnect attachments. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // OperationalStatus: [Output Only] The current status of whether or not + // this interconnect attachment is functional. + // + // Possible values: + // "ACTIVE" + // "OS_ACTIVE" + // "OS_UNPROVISIONED" + // "UNPROVISIONED" + OperationalStatus string `json:"operationalStatus,omitempty"` + + // PrivateInterconnectInfo: [Output Only] Information specific to a + // Private InterconnectAttachment. Only populated if the interconnect + // that this is attached is of type IT_PRIVATE. + PrivateInterconnectInfo *InterconnectAttachmentPrivateInfo `json:"privateInterconnectInfo,omitempty"` + + // Region: [Output Only] URL of the region where the regional + // interconnect attachment resides. + Region string `json:"region,omitempty"` + + // Router: URL of the cloud router to be used for dynamic routing. This + // router must be in the same region as this InterconnectAttachment. The + // InterconnectAttachment will automatically connect the Interconnect to + // the network & region within which the Cloud Router is configured. + Router string `json:"router,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. + // "CloudRouterIpAddress") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CloudRouterIpAddress") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachment) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachment + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentAggregatedList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectAttachmentsScopedList resources. + Items map[string]InterconnectAttachmentsScopedList `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentAggregatedList for aggregated lists of + // interconnect attachments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentAggregatedListWarning: [Output Only] +// Informational warning message. +type InterconnectAttachmentAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentList: Response to the list request, and +// contains a list of interconnect attachments. +type InterconnectAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectAttachment resources. + Items []*InterconnectAttachment `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentList for lists of interconnect + // attachments. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPrivateInfo: Private information for an +// interconnect attachment when this belongs to an interconnect of type +// IT_PRIVATE. +type InterconnectAttachmentPrivateInfo struct { + // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for + // traffic between Google and the customer, going to and from this + // network and region. + Tag8021q int64 `json:"tag8021q,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Tag8021q") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentPrivateInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedList struct { + // InterconnectAttachments: List of interconnect attachments contained + // in this scope. + InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectAttachments") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectAttachments") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentsScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type InterconnectAttachmentsScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectAttachmentsScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectAttachmentsScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectCircuitInfo: Describes a single physical circuit between +// the Customer and Google. CircuitInfo objects are created by Google, +// so all fields are output only. Next id: 4 +type InterconnectCircuitInfo struct { + // CustomerDemarcId: Customer-side demarc ID for this circuit. This will + // only be set if it was provided by the Customer to Google during + // circuit turn-up. + CustomerDemarcId string `json:"customerDemarcId,omitempty"` + + // GoogleCircuitId: Google-assigned unique ID for this circuit. Assigned + // at circuit turn-up. + GoogleCircuitId string `json:"googleCircuitId,omitempty"` + + // GoogleDemarcId: Google-side demarc ID for this circuit. Assigned at + // circuit turn-up and provided by Google to the customer in the LOA. + GoogleDemarcId string `json:"googleDemarcId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomerDemarcId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomerDemarcId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectCircuitInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectCircuitInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectList: Response to the list request, and contains a list +// of interconnects. +type InterconnectList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of Interconnect resources. + Items []*Interconnect `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#interconnectList + // for lists of interconnects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectListWarning: [Output Only] Informational warning message. +type InterconnectListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocation: Protocol definitions for Mixer API to support +// InterconnectLocation. +type InterconnectLocation struct { + // Address: [Output Only] The postal address of the Point of Presence, + // each line in the address is separated by a newline character. + Address string `json:"address,omitempty"` + + // AvailabilityZone: Availability zone for this location. Within a city, + // maintenance will not be simultaneously scheduled in more than one + // availability zone. Example: "zone1" or "zone2". + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // City: City designator used by the Interconnect UI to locate this + // InterconnectLocation within the Continent. For example: "Chicago, + // IL", "Amsterdam, Netherlands". + City string `json:"city,omitempty"` + + // Continent: Continent for this location. Used by the location picker + // in the Interconnect UI. + // + // Possible values: + // "AFRICA" + // "ASIA_PAC" + // "C_AFRICA" + // "C_ASIA_PAC" + // "C_EUROPE" + // "C_NORTH_AMERICA" + // "C_SOUTH_AMERICA" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" + Continent string `json:"continent,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] An optional description of the resource. + Description string `json:"description,omitempty"` + + // FacilityProvider: [Output Only] The name of the provider for this + // facility (e.g., EQUINIX). + FacilityProvider string `json:"facilityProvider,omitempty"` + + // FacilityProviderFacilityId: [Output Only] A provider-assigned + // Identifier for this facility (e.g., Ashburn-DC1). + FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectLocation for interconnect locations. + Kind string `json:"kind,omitempty"` + + // Name: [Output Only] Name of the resource. + Name string `json:"name,omitempty"` + + // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this + // facility (corresponding with a netfac type in peeringdb). + PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` + + // RegionInfos: [Output Only] A list of InterconnectLocation.RegionInfo + // objects, that describe parameters pertaining to the relation between + // this InterconnectLocation and various Google Cloud regions. + RegionInfos []*InterconnectLocationRegionInfo `json:"regionInfos,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocation) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocation + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationList: Response to the list request, and contains +// a list of interconnect locations. +type InterconnectLocationList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InterconnectLocation resources. + Items []*InterconnectLocation `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#interconnectLocationList for lists of interconnect locations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InterconnectLocationListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationList) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationListWarning: [Output Only] Informational warning +// message. +type InterconnectLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*InterconnectLocationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarning) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectLocationRegionInfo: Information about any potential +// InterconnectAttachments between an Interconnect at a specific +// InterconnectLocation, and a specific Cloud Region. +type InterconnectLocationRegionInfo struct { + // ExpectedRttMs: Expected round-trip time in milliseconds, from this + // InterconnectLocation to a VM in this region. + ExpectedRttMs int64 `json:"expectedRttMs,omitempty,string"` + + // LocationPresence: Identifies the network presence of this location. + // + // Possible values: + // "GLOBAL" + // "LOCAL_REGION" + // "LP_GLOBAL" + // "LP_LOCAL_REGION" + LocationPresence string `json:"locationPresence,omitempty"` + + // Region: URL for the region of this location. + Region string `json:"region,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExpectedRttMs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExpectedRttMs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { + type noMethod InterconnectLocationRegionInfo + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectOutageNotification: Description of a planned outage on +// this Interconnect. Next id: 9 +type InterconnectOutageNotification struct { + // AffectedCircuits: Iff issue_type is IT_PARTIAL_OUTAGE, a list of the + // Google-side circuit IDs that will be affected. + AffectedCircuits []string `json:"affectedCircuits,omitempty"` + + // Description: Short user-visible description of the purpose of the + // outage. + Description string `json:"description,omitempty"` + + EndTime int64 `json:"endTime,omitempty,string"` + + // Possible values: + // "IT_OUTAGE" + // "IT_PARTIAL_OUTAGE" + // "OUTAGE" + // "PARTIAL_OUTAGE" + IssueType string `json:"issueType,omitempty"` + + // Name: Unique identifier for this outage notification. + Name string `json:"name,omitempty"` + + // Possible values: + // "GOOGLE" + // "NSRC_GOOGLE" + Source string `json:"source,omitempty"` + + // StartTime: Scheduled start and end times for the outage (milliseconds + // since Unix epoch). + StartTime int64 `json:"startTime,omitempty,string"` + + // Possible values: + // "ACTIVE" + // "CANCELLED" + // "NS_ACTIVE" + // "NS_CANCELED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AffectedCircuits") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AffectedCircuits") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { + type noMethod InterconnectOutageNotification + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // License: A license resource. type License struct { // ChargesUseFee: [Output Only] Deprecated. This field no longer @@ -8113,6 +12753,9 @@ type MachineTypeAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8140,6 +12783,108 @@ func (s *MachineTypeAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MachineTypeAggregatedListWarning: [Output Only] Informational warning +// message. +type MachineTypeAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // MachineTypeList: Contains a list of machine types. type MachineTypeList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -8164,6 +12909,9 @@ type MachineTypeList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *MachineTypeListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8191,6 +12939,107 @@ func (s *MachineTypeList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MachineTypeListWarning: [Output Only] Informational warning message. +type MachineTypeListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*MachineTypeListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarning) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type MachineTypeListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *MachineTypeListWarningData) MarshalJSON() ([]byte, error) { + type noMethod MachineTypeListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type MachineTypesScopedList struct { // MachineTypes: [Output Only] List of machine types contained in this // scope. @@ -8233,9 +13082,13 @@ type MachineTypesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -8246,7 +13099,9 @@ type MachineTypesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -8657,6 +13512,11 @@ type Network struct { // Peerings: [Output Only] List of network peerings for the resource. Peerings []*NetworkPeering `json:"peerings,omitempty"` + // RoutingConfig: The network-level routing configuration for this + // network. Used by Cloud Router to determine what type of network-wide + // routing behavior to enforce. + RoutingConfig *NetworkRoutingConfig `json:"routingConfig,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -8795,6 +13655,9 @@ type NetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *NetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -8822,6 +13685,107 @@ func (s *NetworkList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkListWarning: [Output Only] Informational warning message. +type NetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*NetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod NetworkListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NetworkPeering: A network peering attached to a network resource. The // message includes the peering name, peer network, peering state, and a // flag indicating whether Google Compute Engine should automatically @@ -8884,6 +13848,45 @@ func (s *NetworkPeering) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NetworkRoutingConfig: A routing configuration attached to a network +// resource. The message includes the list of routers associated with +// the network, and a flag indicating the type of routing behavior to +// enforce network-wide. +type NetworkRoutingConfig struct { + // RoutingMode: The network-wide routing mode to use. If set to + // REGIONAL, this network's cloud routers will only advertise routes + // with subnetworks of this network in the same region as the router. If + // set to GLOBAL, this network's cloud routers will advertise routes + // with all subnetworks of this network, across regions. + // + // Possible values: + // "GLOBAL" + // "REGIONAL" + RoutingMode string `json:"routingMode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RoutingMode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RoutingMode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NetworkRoutingConfig) MarshalJSON() ([]byte, error) { + type noMethod NetworkRoutingConfig + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type NetworksAddPeeringRequest struct { // AutoCreateRoutes: Whether Google Compute Engine manages the routes // automatically. @@ -9150,9 +14153,13 @@ type OperationWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9163,7 +14170,9 @@ type OperationWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9259,6 +14268,9 @@ type OperationAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9286,6 +14298,108 @@ func (s *OperationAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// OperationAggregatedListWarning: [Output Only] Informational warning +// message. +type OperationAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // OperationList: Contains a list of Operation resources. type OperationList struct { // Id: [Output Only] The unique identifier for the resource. This @@ -9310,6 +14424,9 @@ type OperationList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *OperationListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9337,6 +14454,107 @@ func (s *OperationList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// OperationListWarning: [Output Only] Informational warning message. +type OperationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*OperationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationListWarning) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationListWarningData) MarshalJSON() ([]byte, error) { + type noMethod OperationListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type OperationsScopedList struct { // Operations: [Output Only] List of operations contained in this scope. Operations []*Operation `json:"operations,omitempty"` @@ -9378,9 +14596,13 @@ type OperationsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -9391,7 +14613,9 @@ type OperationsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -9946,6 +15170,9 @@ type RegionAutoscalerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionAutoscalerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -9973,6 +15200,108 @@ func (s *RegionAutoscalerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RegionAutoscalerListWarning: [Output Only] Informational warning +// message. +type RegionAutoscalerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionAutoscalerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAutoscalerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionAutoscalerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAutoscalerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionAutoscalerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RegionInstanceGroupList: Contains a list of InstanceGroup resources. type RegionInstanceGroupList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -9996,6 +15325,9 @@ type RegionInstanceGroupList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10023,6 +15355,108 @@ func (s *RegionInstanceGroupList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RegionInstanceGroupListWarning: [Output Only] Informational warning +// message. +type RegionInstanceGroupListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RegionInstanceGroupManagerList: Contains a list of managed instance // groups. type RegionInstanceGroupManagerList struct { @@ -10049,6 +15483,9 @@ type RegionInstanceGroupManagerList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupManagerListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10076,6 +15513,108 @@ func (s *RegionInstanceGroupManagerList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RegionInstanceGroupManagerListWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupManagerListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupManagerListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupManagerListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupManagerListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RegionInstanceGroupManagersAbandonInstancesRequest struct { // Instances: The URLs of one or more instances to abandon. This can be // a full URL or a partial URL, such as @@ -10280,6 +15819,9 @@ type RegionInstanceGroupsListInstances struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionInstanceGroupsListInstancesWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10307,6 +15849,108 @@ func (s *RegionInstanceGroupsListInstances) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RegionInstanceGroupsListInstancesWarning: [Output Only] Informational +// warning message. +type RegionInstanceGroupsListInstancesWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionInstanceGroupsListInstancesWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionInstanceGroupsListInstancesWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionInstanceGroupsListInstancesWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionInstanceGroupsListInstancesWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RegionInstanceGroupsListInstancesRequest struct { // InstanceState: Instances in which state should be returned. Valid // options are: 'ALL', 'RUNNING'. By default, it lists all instances. @@ -10404,6 +16048,9 @@ type RegionList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RegionListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10431,6 +16078,107 @@ func (s *RegionList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RegionListWarning: [Output Only] Informational warning message. +type RegionListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RegionListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionListWarning) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RegionListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ResourceCommitment: Commitment for a particular resource (a // Commitment is composed of one or more of these). type ResourceCommitment struct { @@ -10635,9 +16383,13 @@ type RouteWarnings struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -10648,7 +16400,9 @@ type RouteWarnings struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -10744,6 +16498,9 @@ type RouteList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouteListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10771,6 +16528,107 @@ func (s *RouteList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RouteListWarning: [Output Only] Informational warning message. +type RouteListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouteListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouteListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouteListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouteListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Router: Router resource. type Router struct { // Bgp: BGP information specific to this router. @@ -10870,6 +16728,9 @@ type RouterAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -10897,6 +16758,108 @@ func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RouterAggregatedListWarning: [Output Only] Informational warning +// message. +type RouterAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouterAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RouterBgp struct { // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 // private ASN, either 16-bit or 32-bit. The value will be fixed for @@ -10985,6 +16948,12 @@ type RouterInterface struct { // interface. IpRange string `json:"ipRange,omitempty"` + // LinkedInterconnectAttachment: URI of the linked interconnect + // attachment. It must be in the same region as the router. Each + // interface can have at most one linked resource and it could either be + // a VPN Tunnel or an interconnect attachment. + LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` + // LinkedVpnTunnel: URI of the linked VPN tunnel. It must be in the same // region as the router. Each interface can have at most one linked // resource and it could either be a VPN Tunnel or an interconnect @@ -11042,6 +17011,9 @@ type RouterList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *RouterListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11069,6 +17041,107 @@ func (s *RouterList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// RouterListWarning: [Output Only] Informational warning message. +type RouterListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RouterListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterListWarning) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RouterListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RouterListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RouterListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RouterStatus struct { // BestRoutes: Best routes for this router's network. BestRoutes []*Route `json:"bestRoutes,omitempty"` @@ -11270,9 +17343,13 @@ type RoutersScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -11283,7 +17360,9 @@ type RoutersScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -11638,7 +17717,7 @@ type Snapshot struct { // "UPLOADING" Status string `json:"status,omitempty"` - // StorageBytes: [Output Only] A size of the the storage used by the + // StorageBytes: [Output Only] A size of the storage used by the // snapshot. As snapshots share storage, this number is expected to // change with snapshot creation/deletion. StorageBytes int64 `json:"storageBytes,omitempty,string"` @@ -11705,6 +17784,9 @@ type SnapshotList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SnapshotListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11732,6 +17814,107 @@ func (s *SnapshotList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SnapshotListWarning: [Output Only] Informational warning message. +type SnapshotListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SnapshotListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotListWarning) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SnapshotListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SnapshotListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SslCertificate: An SslCertificate resource. This resource provides a // mechanism to upload an SSL key and certificate to the load balancer // to serve secure connections from the user. @@ -11823,6 +18006,9 @@ type SslCertificateList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SslCertificateListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11850,6 +18036,108 @@ func (s *SslCertificateList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SslCertificateListWarning: [Output Only] Informational warning +// message. +type SslCertificateListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SslCertificateListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarning) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SslCertificateListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SslCertificateListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SslCertificateListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Subnetwork: A Subnetwork resource. type Subnetwork struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -11966,6 +18254,9 @@ type SubnetworkAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -11993,6 +18284,108 @@ func (s *SubnetworkAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SubnetworkAggregatedListWarning: [Output Only] Informational warning +// message. +type SubnetworkAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SubnetworkList: Contains a list of Subnetwork resources. type SubnetworkList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -12017,6 +18410,9 @@ type SubnetworkList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *SubnetworkListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12044,6 +18440,107 @@ func (s *SubnetworkList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SubnetworkListWarning: [Output Only] Informational warning message. +type SubnetworkListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*SubnetworkListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarning) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SubnetworkListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SubnetworkListWarningData) MarshalJSON() ([]byte, error) { + type noMethod SubnetworkListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SubnetworkSecondaryRange: Represents a secondary IP range of a // subnetwork. type SubnetworkSecondaryRange struct { @@ -12155,9 +18652,13 @@ type SubnetworksScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12168,7 +18669,9 @@ type SubnetworksScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -12447,6 +18950,9 @@ type TargetHttpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12474,6 +18980,108 @@ func (s *TargetHttpProxyList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetHttpProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetHttpsProxiesSetSslCertificatesRequest struct { // SslCertificates: New set of SslCertificate resources to associate // with this TargetHttpsProxy resource. Currently exactly one @@ -12601,6 +19209,9 @@ type TargetHttpsProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetHttpsProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12628,6 +19239,108 @@ func (s *TargetHttpsProxyList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetHttpsProxyListWarning: [Output Only] Informational warning +// message. +type TargetHttpsProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetHttpsProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetHttpsProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetHttpsProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetHttpsProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetInstance: A TargetInstance resource. This resource defines an // endpoint instance that terminates traffic of certain protocols. type TargetInstance struct { @@ -12730,6 +19443,9 @@ type TargetInstanceAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12757,6 +19473,108 @@ func (s *TargetInstanceAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetInstanceAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetInstanceAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetInstanceList: Contains a list of TargetInstance resources. type TargetInstanceList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -12780,6 +19598,9 @@ type TargetInstanceList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetInstanceListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12807,6 +19628,108 @@ func (s *TargetInstanceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetInstanceListWarning: [Output Only] Informational warning +// message. +type TargetInstanceListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetInstanceListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetInstanceListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetInstanceListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetInstanceListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetInstancesScopedList struct { // TargetInstances: List of target instances contained in this scope. TargetInstances []*TargetInstance `json:"targetInstances,omitempty"` @@ -12849,9 +19772,13 @@ type TargetInstancesScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -12862,7 +19789,9 @@ type TargetInstancesScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13102,6 +20031,9 @@ type TargetPoolAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13129,6 +20061,108 @@ func (s *TargetPoolAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetPoolAggregatedListWarning: [Output Only] Informational warning +// message. +type TargetPoolAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetPoolInstanceHealth struct { HealthStatus []*HealthStatus `json:"healthStatus,omitempty"` @@ -13188,6 +20222,9 @@ type TargetPoolList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetPoolListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13215,6 +20252,107 @@ func (s *TargetPoolList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetPoolListWarning: [Output Only] Informational warning message. +type TargetPoolListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetPoolListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetPoolListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetPoolListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetPoolListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetPoolsAddHealthCheckRequest struct { // HealthChecks: The HttpHealthCheck to add to the target pool. HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"` @@ -13375,9 +20513,13 @@ type TargetPoolsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -13388,7 +20530,9 @@ type TargetPoolsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -13676,6 +20820,9 @@ type TargetSslProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetSslProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13703,6 +20850,108 @@ func (s *TargetSslProxyList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetSslProxyListWarning: [Output Only] Informational warning +// message. +type TargetSslProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetSslProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetSslProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetSslProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetSslProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetTcpProxiesSetBackendServiceRequest struct { // Service: The URL of the new BackendService resource for the // targetTcpProxy. @@ -13857,6 +21106,9 @@ type TargetTcpProxyList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetTcpProxyListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -13884,6 +21136,108 @@ func (s *TargetTcpProxyList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetTcpProxyListWarning: [Output Only] Informational warning +// message. +type TargetTcpProxyListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetTcpProxyListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetTcpProxyListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetTcpProxyListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetTcpProxyListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetVpnGateway: Represents a Target VPN gateway resource. type TargetVpnGateway struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -13992,6 +21346,9 @@ type TargetVpnGatewayAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14019,6 +21376,108 @@ func (s *TargetVpnGatewayAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetVpnGatewayAggregatedListWarning: [Output Only] Informational +// warning message. +type TargetVpnGatewayAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // TargetVpnGatewayList: Contains a list of TargetVpnGateway resources. type TargetVpnGatewayList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -14043,6 +21502,9 @@ type TargetVpnGatewayList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *TargetVpnGatewayListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14070,6 +21532,108 @@ func (s *TargetVpnGatewayList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TargetVpnGatewayListWarning: [Output Only] Informational warning +// message. +type TargetVpnGatewayListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*TargetVpnGatewayListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarning) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type TargetVpnGatewayListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TargetVpnGatewayListWarningData) MarshalJSON() ([]byte, error) { + type noMethod TargetVpnGatewayListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type TargetVpnGatewaysScopedList struct { // TargetVpnGateways: [Output Only] List of target vpn gateways // contained in this scope. @@ -14113,9 +21677,13 @@ type TargetVpnGatewaysScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14126,7 +21694,9 @@ type TargetVpnGatewaysScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14334,6 +21904,9 @@ type UrlMapList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *UrlMapListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14361,6 +21934,107 @@ func (s *UrlMapList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UrlMapListWarning: [Output Only] Informational warning message. +type UrlMapListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*UrlMapListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarning) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type UrlMapListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *UrlMapListWarningData) MarshalJSON() ([]byte, error) { + type noMethod UrlMapListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type UrlMapReference struct { UrlMap string `json:"urlMap,omitempty"` @@ -14702,6 +22376,9 @@ type VpnTunnelAggregatedList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelAggregatedListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14729,6 +22406,108 @@ func (s *VpnTunnelAggregatedList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VpnTunnelAggregatedListWarning: [Output Only] Informational warning +// message. +type VpnTunnelAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelAggregatedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // VpnTunnelList: Contains a list of VpnTunnel resources. type VpnTunnelList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -14753,6 +22532,9 @@ type VpnTunnelList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *VpnTunnelListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14780,6 +22562,107 @@ func (s *VpnTunnelList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// VpnTunnelListWarning: [Output Only] Informational warning message. +type VpnTunnelListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*VpnTunnelListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarning) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type VpnTunnelListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *VpnTunnelListWarningData) MarshalJSON() ([]byte, error) { + type noMethod VpnTunnelListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type VpnTunnelsScopedList struct { // VpnTunnels: List of vpn tunnels contained in this scope. VpnTunnels []*VpnTunnel `json:"vpnTunnels,omitempty"` @@ -14821,9 +22704,13 @@ type VpnTunnelsScopedListWarning struct { // Possible values: // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" // "NEXT_HOP_INSTANCE_NOT_FOUND" @@ -14834,7 +22721,9 @@ type VpnTunnelsScopedListWarning struct { // "REQUIRED_TOS_AGREEMENT" // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" // "UNREACHABLE" Code string `json:"code,omitempty"` @@ -14930,6 +22819,9 @@ type XpnHostList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *XpnHostListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -14957,6 +22849,107 @@ func (s *XpnHostList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// XpnHostListWarning: [Output Only] Informational warning message. +type XpnHostListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*XpnHostListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarning) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type XpnHostListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *XpnHostListWarningData) MarshalJSON() ([]byte, error) { + type noMethod XpnHostListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // XpnResourceId: Service resource (a.k.a service project) ID. type XpnResourceId struct { // Id: The ID of the service resource. In the case of projects, this @@ -14996,6 +22989,10 @@ func (s *XpnResourceId) MarshalJSON() ([]byte, error) { // Zone: A Zone resource. type Zone struct { + // AvailableCpuPlatforms: [Output Only] Available cpu/platform + // selections for the zone. + AvailableCpuPlatforms []string `json:"availableCpuPlatforms,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -15036,15 +23033,16 @@ type Zone struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AvailableCpuPlatforms") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "AvailableCpuPlatforms") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -15083,6 +23081,9 @@ type ZoneList struct { // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` + // Warning: [Output Only] Informational warning message. + Warning *ZoneListWarning `json:"warning,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -15110,6 +23111,107 @@ func (s *ZoneList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ZoneListWarning: [Output Only] Informational warning message. +type ZoneListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DEPRECATED_TYPE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "EXPERIMENTAL_TYPE_USED" + // "EXTERNAL_API_WARNING" + // "FIELD_VALUE_OVERRIDEN" + // "INJECTED_KERNELS_DEPRECATED" + // "MISSING_TYPE_DEPENDENCY" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" + // "RESOURCE_NOT_DELETED" + // "SCHEMA_VALIDATION_IGNORED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNDECLARED_PROPERTIES" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*ZoneListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarning) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ZoneListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ZoneListWarningData) MarshalJSON() ([]byte, error) { + type noMethod ZoneListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ZoneSetLabelsRequest struct { // LabelFingerprint: The fingerprint of the previous set of labels for // this resource, used to detect conflicts. The fingerprint is initially @@ -39221,6 +47323,190 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "compute.instances.setDeletionProtection": + +type InstancesSetDeletionProtectionCall struct { + s *Service + project string + zone string + resource string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetDeletionProtection: Sets deletion protection on the instance. +func (r *InstancesService) SetDeletionProtection(project string, zone string, resource string) *InstancesSetDeletionProtectionCall { + c := &InstancesSetDeletionProtectionCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.resource = resource + return c +} + +// DeletionProtection sets the optional parameter "deletionProtection": +// Whether the resource should be protected against deletion. +func (c *InstancesSetDeletionProtectionCall) DeletionProtection(deletionProtection bool) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("deletionProtection", fmt.Sprint(deletionProtection)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetDeletionProtectionCall) RequestId(requestId string) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetDeletionProtectionCall) Fields(s ...googleapi.Field) *InstancesSetDeletionProtectionCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetDeletionProtectionCall) Context(ctx context.Context) *InstancesSetDeletionProtectionCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetDeletionProtectionCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetDeletionProtectionCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{resource}/setDeletionProtection") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setDeletionProtection" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetDeletionProtectionCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets deletion protection on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.setDeletionProtection", + // "parameterOrder": [ + // "project", + // "zone", + // "resource" + // ], + // "parameters": { + // "deletionProtection": { + // "default": "true", + // "description": "Whether the resource should be protected against deletion.", + // "location": "query", + // "type": "boolean" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "resource": { + // "description": "Name of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{resource}/setDeletionProtection", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setDiskAutoDelete": type InstancesSetDiskAutoDeleteCall struct { @@ -40140,6 +48426,190 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "compute.instances.setMinCpuPlatform": + +type InstancesSetMinCpuPlatformCall struct { + s *Service + project string + zone string + instance string + instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMinCpuPlatform: Changes the minimum CPU platform that this +// instance should use. This method can only be called on a stopped +// instance. For more information, read Specifying a Minimum CPU +// Platform. +func (r *InstancesService) SetMinCpuPlatform(project string, zone string, instance string, instancessetmincpuplatformrequest *InstancesSetMinCpuPlatformRequest) *InstancesSetMinCpuPlatformCall { + c := &InstancesSetMinCpuPlatformCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancessetmincpuplatformrequest = instancessetmincpuplatformrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InstancesSetMinCpuPlatformCall) RequestId(requestId string) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSetMinCpuPlatformCall) Fields(s ...googleapi.Field) *InstancesSetMinCpuPlatformCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSetMinCpuPlatformCall) Context(ctx context.Context) *InstancesSetMinCpuPlatformCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSetMinCpuPlatformCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSetMinCpuPlatformCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmincpuplatformrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.setMinCpuPlatform" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes the minimum CPU platform that this instance should use. This method can only be called on a stopped instance. For more information, read Specifying a Minimum CPU Platform.", + // "httpMethod": "POST", + // "id": "compute.instances.setMinCpuPlatform", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform", + // "request": { + // "$ref": "InstancesSetMinCpuPlatformRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.setScheduling": type InstancesSetSchedulingCall struct { @@ -41059,11 +49529,10 @@ type InstancesStopCall struct { // Stop: Stops a running instance, shutting it down cleanly, and allows // you to restart the instance at a later time. Stopped instances do not -// incur per-minute, virtual machine usage charges while they are -// stopped, but any resources that the virtual machine is using, such as -// persistent disks and static IP addresses, will continue to be charged -// until they are deleted. For more information, see Stopping an -// instance. +// incur VM usage charges while they are stopped. However, resources +// that the VM is using, such as persistent disks and static IP +// addresses, will continue to be charged until they are deleted. For +// more information, see Stopping an instance. // For details, see https://cloud.google.com/compute/docs/reference/latest/instances/stop func (r *InstancesService) Stop(project string, zone string, instance string) *InstancesStopCall { c := &InstancesStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -41175,7 +49644,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } return ret, nil // { - // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur per-minute, virtual machine usage charges while they are stopped, but any resources that the virtual machine is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", + // "description": "Stops a running instance, shutting it down cleanly, and allows you to restart the instance at a later time. Stopped instances do not incur VM usage charges while they are stopped. However, resources that the VM is using, such as persistent disks and static IP addresses, will continue to be charged until they are deleted. For more information, see Stopping an instance.", // "httpMethod": "POST", // "id": "compute.instances.stop", // "parameterOrder": [ @@ -41223,6 +49692,2327 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) } +// method id "compute.interconnectAttachments.aggregatedList": + +type InterconnectAttachmentsAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves an aggregated list of interconnect +// attachments. +func (r *InterconnectAttachmentsService) AggregatedList(project string) *InterconnectAttachmentsAggregatedListCall { + c := &InterconnectAttachmentsAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsAggregatedListCall) MaxResults(maxResults int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsAggregatedListCall) OrderBy(orderBy string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsAggregatedListCall) PageToken(pageToken string) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsAggregatedListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsAggregatedListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsAggregatedListCall) Context(ctx context.Context) *InterconnectAttachmentsAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.aggregatedList" call. +// Exactly one of *InterconnectAttachmentAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectAttachmentAggregatedList.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachmentAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of interconnect attachments.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/interconnectAttachments", + // "response": { + // "$ref": "InterconnectAttachmentAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsAggregatedListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnectAttachments.delete": + +type InterconnectAttachmentsDeleteCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Delete(project string, region string, interconnectAttachment string) *InterconnectAttachmentsDeleteCall { + c := &InterconnectAttachmentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsDeleteCall) RequestId(requestId string) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsDeleteCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsDeleteCall) Context(ctx context.Context) *InterconnectAttachmentsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified interconnect attachment.", + // "httpMethod": "DELETE", + // "id": "compute.interconnectAttachments.delete", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnectAttachments.get": + +type InterconnectAttachmentsGetCall struct { + s *Service + project string + region string + interconnectAttachment string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified interconnect attachment. +func (r *InterconnectAttachmentsService) Get(project string, region string, interconnectAttachment string) *InterconnectAttachmentsGetCall { + c := &InterconnectAttachmentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectAttachment = interconnectAttachment + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsGetCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsGetCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsGetCall) Context(ctx context.Context) *InterconnectAttachmentsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "interconnectAttachment": c.interconnectAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.get" call. +// Exactly one of *InterconnectAttachment or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectAttachment.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachment, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachment{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified interconnect attachment.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.get", + // "parameterOrder": [ + // "project", + // "region", + // "interconnectAttachment" + // ], + // "parameters": { + // "interconnectAttachment": { + // "description": "Name of the interconnect attachment to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments/{interconnectAttachment}", + // "response": { + // "$ref": "InterconnectAttachment" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectAttachments.insert": + +type InterconnectAttachmentsInsertCall struct { + s *Service + project string + region string + interconnectattachment *InterconnectAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an InterconnectAttachment in the specified project +// using the data included in the request. +func (r *InterconnectAttachmentsService) Insert(project string, region string, interconnectattachment *InterconnectAttachment) *InterconnectAttachmentsInsertCall { + c := &InterconnectAttachmentsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.interconnectattachment = interconnectattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectAttachmentsInsertCall) RequestId(requestId string) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsInsertCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsInsertCall) Context(ctx context.Context) *InterconnectAttachmentsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnectattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectAttachmentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an InterconnectAttachment in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.interconnectAttachments.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments", + // "request": { + // "$ref": "InterconnectAttachment" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnectAttachments.list": + +type InterconnectAttachmentsListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect attachments contained within +// the specified region. +func (r *InterconnectAttachmentsService) List(project string, region string) *InterconnectAttachmentsListCall { + c := &InterconnectAttachmentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectAttachmentsListCall) MaxResults(maxResults int64) *InterconnectAttachmentsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectAttachmentsListCall) OrderBy(orderBy string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectAttachmentsListCall) PageToken(pageToken string) *InterconnectAttachmentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectAttachmentsListCall) Fields(s ...googleapi.Field) *InterconnectAttachmentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectAttachmentsListCall) IfNoneMatch(entityTag string) *InterconnectAttachmentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectAttachmentsListCall) Context(ctx context.Context) *InterconnectAttachmentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectAttachmentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectAttachmentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/interconnectAttachments") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectAttachments.list" call. +// Exactly one of *InterconnectAttachmentList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectAttachmentList.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*InterconnectAttachmentList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectAttachmentList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect attachments contained within the specified region.", + // "httpMethod": "GET", + // "id": "compute.interconnectAttachments.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/interconnectAttachments", + // "response": { + // "$ref": "InterconnectAttachmentList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectAttachmentsListCall) Pages(ctx context.Context, f func(*InterconnectAttachmentList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnectLocations.get": + +type InterconnectLocationsGetCall struct { + s *Service + project string + interconnectLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the details for the specified interconnect location. Get +// a list of available interconnect locations by making a list() +// request. +func (r *InterconnectLocationsService) Get(project string, interconnectLocation string) *InterconnectLocationsGetCall { + c := &InterconnectLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnectLocation = interconnectLocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsGetCall) Context(ctx context.Context) *InterconnectLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations/{interconnectLocation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnectLocation": c.interconnectLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.get" call. +// Exactly one of *InterconnectLocation or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectLocation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the details for the specified interconnect location. Get a list of available interconnect locations by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.get", + // "parameterOrder": [ + // "project", + // "interconnectLocation" + // ], + // "parameters": { + // "interconnectLocation": { + // "description": "Name of the interconnect location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations/{interconnectLocation}", + // "response": { + // "$ref": "InterconnectLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectLocations.list": + +type InterconnectLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect locations available to the +// specified project. +func (r *InterconnectLocationsService) List(project string) *InterconnectLocationsListCall { + c := &InterconnectLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectLocationsListCall) MaxResults(maxResults int64) *InterconnectLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectLocationsListCall) OrderBy(orderBy string) *InterconnectLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectLocationsListCall) PageToken(pageToken string) *InterconnectLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectLocationsListCall) Fields(s ...googleapi.Field) *InterconnectLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectLocationsListCall) IfNoneMatch(entityTag string) *InterconnectLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectLocationsListCall) Context(ctx context.Context) *InterconnectLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnectLocations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectLocations.list" call. +// Exactly one of *InterconnectLocationList or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectLocationList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectLocationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectLocationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect locations available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnectLocations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnectLocations", + // "response": { + // "$ref": "InterconnectLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*InterconnectLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnects.delete": + +type InterconnectsDeleteCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified interconnect. +func (r *InterconnectsService) Delete(project string, interconnect string) *InterconnectsDeleteCall { + c := &InterconnectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsDeleteCall) RequestId(requestId string) *InterconnectsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsDeleteCall) Fields(s ...googleapi.Field) *InterconnectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsDeleteCall) Context(ctx context.Context) *InterconnectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified interconnect.", + // "httpMethod": "DELETE", + // "id": "compute.interconnects.delete", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnects.get": + +type InterconnectsGetCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified interconnect. Get a list of available +// interconnects by making a list() request. +func (r *InterconnectsService) Get(project string, interconnect string) *InterconnectsGetCall { + c := &InterconnectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsGetCall) Fields(s ...googleapi.Field) *InterconnectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetCall) IfNoneMatch(entityTag string) *InterconnectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsGetCall) Context(ctx context.Context) *InterconnectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.get" call. +// Exactly one of *Interconnect or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Interconnect.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsGetCall) Do(opts ...googleapi.CallOption) (*Interconnect, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Interconnect{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified interconnect. Get a list of available interconnects by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.interconnects.get", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "response": { + // "$ref": "Interconnect" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnects.insert": + +type InterconnectsInsertCall struct { + s *Service + project string + interconnect *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a Interconnect in the specified project using the +// data included in the request. +func (r *InterconnectsService) Insert(project string, interconnect *Interconnect) *InterconnectsInsertCall { + c := &InterconnectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsInsertCall) RequestId(requestId string) *InterconnectsInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsInsertCall) Fields(s ...googleapi.Field) *InterconnectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsInsertCall) Context(ctx context.Context) *InterconnectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Interconnect in the specified project using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.interconnects.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects", + // "request": { + // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.interconnects.list": + +type InterconnectsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect available to the specified +// project. +func (r *InterconnectsService) List(project string) *InterconnectsListCall { + c := &InterconnectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter +// {expression} for filtering listed resources. Your {expression} must +// be in the format: field_name comparison_string literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use name ne example-instance. +// +// You can filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// To filter on multiple expressions, provide each separate expression +// within parentheses. For example, (scheduling.automaticRestart eq +// true) (zone eq us-central1-f). Multiple expressions are treated as +// AND expressions, meaning that resources must match all expressions to +// pass the filters. +func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. Acceptable values are 0 to 500, inclusive. +// (Default: 500) +func (c *InterconnectsListCall) MaxResults(maxResults int64) *InterconnectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. +// +// You can also sort results in descending order based on the creation +// timestamp using orderBy="creationTimestamp desc". This sorts results +// based on the creationTimestamp field in reverse chronological order +// (newest result first). Use this to sort resources like operations so +// that the newest operation is returned first. +// +// Currently, only sorting by name or creationTimestamp desc is +// supported. +func (c *InterconnectsListCall) OrderBy(orderBy string) *InterconnectsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *InterconnectsListCall) PageToken(pageToken string) *InterconnectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsListCall) Fields(s ...googleapi.Field) *InterconnectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsListCall) IfNoneMatch(entityTag string) *InterconnectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsListCall) Context(ctx context.Context) *InterconnectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.list" call. +// Exactly one of *InterconnectList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InterconnectList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &InterconnectList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.interconnects.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests. Acceptable values are 0 to 500, inclusive. (Default: 500)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation timestamp using orderBy=\"creationTimestamp desc\". This sorts results based on the creationTimestamp field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by name or creationTimestamp desc is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects", + // "response": { + // "$ref": "InterconnectList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectsListCall) Pages(ctx context.Context, f func(*InterconnectList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.interconnects.patch": + +type InterconnectsPatchCall struct { + s *Service + project string + interconnect string + interconnect2 *Interconnect + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified interconnect with the data included in +// the request. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. +func (r *InterconnectsService) Patch(project string, interconnect string, interconnect2 *Interconnect) *InterconnectsPatchCall { + c := &InterconnectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + c.interconnect2 = interconnect2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *InterconnectsPatchCall) RequestId(requestId string) *InterconnectsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsPatchCall) Fields(s ...googleapi.Field) *InterconnectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsPatchCall) Context(ctx context.Context) *InterconnectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.interconnect2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/interconnects/{interconnect}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InterconnectsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified interconnect with the data included in the request. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "httpMethod": "PATCH", + // "id": "compute.interconnects.patch", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/interconnects/{interconnect}", + // "request": { + // "$ref": "Interconnect" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.licenses.get": type LicensesGetCall struct { @@ -42953,6 +53743,177 @@ func (c *NetworksListCall) Pages(ctx context.Context, f func(*NetworkList) error } } +// method id "compute.networks.patch": + +type NetworksPatchCall struct { + s *Service + project string + network string + network2 *Network + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified network with the data included in the +// request. +func (r *NetworksService) Patch(project string, network string, network2 *Network) *NetworksPatchCall { + c := &NetworksPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.network = network + c.network2 = network2 + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. +// +// For example, consider a situation where you make an initial request +// and the request times out. If you make the request again with the +// same request ID, the server can check if original operation with the +// same request ID was received, and if so, will ignore the second +// request. This prevents clients from accidentally creating duplicate +// commitments. +// +// The request ID must be a valid UUID with the exception that zero UUID +// is not supported (00000000-0000-0000-0000-000000000000). +func (c *NetworksPatchCall) RequestId(requestId string) *NetworksPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworksPatchCall) Fields(s ...googleapi.Field) *NetworksPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworksPatchCall) Context(ctx context.Context) *NetworksPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworksPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.network2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "network": c.network, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networks.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworksPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified network with the data included in the request.", + // "httpMethod": "PATCH", + // "id": "compute.networks.patch", + // "parameterOrder": [ + // "project", + // "network" + // ], + // "parameters": { + // "network": { + // "description": "Name of the network to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/global/networks/{network}", + // "request": { + // "$ref": "Network" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.networks.removePeering": type NetworksRemovePeeringCall struct { diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index 8dad9140dd..3593cfc00d 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -1,10 +1,12 @@ { + "discoveryVersion": "v1", + "version_module": true, "schemas": { "UndeleteRoleRequest": { "properties": { "etag": { - "format": "byte", "description": "Used to perform a consistent read-modify-write.", + "format": "byte", "type": "string" } }, @@ -16,36 +18,34 @@ "description": "The service account create request.", "type": "object", "properties": { - "accountId": { - "description": "Required. The account id that is used to generate the service account\nemail address and a stable unique id. It is unique within a project,\nmust be 6-30 characters long, and match the regular expression\n`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035.", - "type": "string" - }, "serviceAccount": { - "description": "The ServiceAccount resource to create.\nCurrently, only the following values are user assignable:\n`display_name` .", - "$ref": "ServiceAccount" + "$ref": "ServiceAccount", + "description": "The ServiceAccount resource to create.\nCurrently, only the following values are user assignable:\n`display_name` ." + }, + "accountId": { + "type": "string", + "description": "Required. The account id that is used to generate the service account\nemail address and a stable unique id. It is unique within a project,\nmust be 6-30 characters long, and match the regular expression\n`[a-z]([-a-z0-9]*[a-z0-9])` to comply with RFC1035." } }, "id": "CreateServiceAccountRequest" }, "Role": { + "id": "Role", + "description": "A role in the Identity and Access Management API.", + "type": "object", "properties": { - "includedPermissions": { - "description": "The names of the permissions this role grants when bound in an IAM policy.", - "items": { - "type": "string" - }, - "type": "array" - }, "description": { "description": "Optional. A human-readable description for the role.", "type": "string" }, "etag": { - "format": "byte", "description": "Used to perform a consistent read-modify-write.", + "format": "byte", "type": "string" }, "stage": { + "description": "The current launch stage of the role.", + "type": "string", "enumDescriptions": [ "The user has indicated this role is currently in an alpha phase.", "The user has indicated this role is currently in a beta phase.", @@ -61,9 +61,7 @@ "DEPRECATED", "DISABLED", "EAP" - ], - "description": "The current launch stage of the role.", - "type": "string" + ] }, "name": { "description": "The name of the role.\n\nWhen Role is used in CreateRole, the role name must not be set.\n\nWhen Role is used in output and other input such as UpdateRole, the role\nname is the complete path, e.g., roles/logging.viewer for curated roles\nand organizations/{ORGANIZATION_ID}/roles/logging.viewer for custom roles.", @@ -76,11 +74,15 @@ "title": { "description": "Optional. A human-readable title for the role. Typically this\nis limited to 100 UTF-8 bytes.", "type": "string" + }, + "includedPermissions": { + "description": "The names of the permissions this role grants when bound in an IAM policy.", + "type": "array", + "items": { + "type": "string" + } } - }, - "id": "Role", - "description": "A role in the Identity and Access Management API.", - "type": "object" + } }, "Binding": { "description": "Associates `members` with a `role`.", @@ -88,14 +90,14 @@ "properties": { "members": { "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "type": "array", "items": { "type": "string" - }, - "type": "array" + } }, "role": { - "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", - "type": "string" + "type": "string", + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired" } }, "id": "Binding" @@ -110,13 +112,13 @@ "description": "Textual representation of an expression in\nCommon Expression Language syntax.\n\nThe application context of the containing message determines which\nwell-known feature set of CEL is supported.", "type": "string" }, - "location": { - "description": "An optional string indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", - "type": "string" - }, "title": { "description": "An optional title for the expression, i.e. a short string describing\nits purpose. This can be used e.g. in UIs which allow to enter the\nexpression.", "type": "string" + }, + "location": { + "description": "An optional string indicating the location of the expression for error\nreporting, e.g. a file name and a position in the file.", + "type": "string" } }, "id": "Expr", @@ -125,14 +127,6 @@ }, "ServiceAccount": { "properties": { - "email": { - "description": "@OutputOnly The email address of the service account.", - "type": "string" - }, - "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", - "type": "string" - }, "projectId": { "description": "@OutputOnly The id of the project that owns the service account.", "type": "string" @@ -142,21 +136,29 @@ "type": "string" }, "oauth2ClientId": { - "description": "@OutputOnly. The OAuth2 client id for the service account.\nThis is used in conjunction with the OAuth2 clientconfig API to make\nthree legged OAuth2 (3LO) flows to access the data of Google users.", - "type": "string" + "type": "string", + "description": "@OutputOnly. The OAuth2 client id for the service account.\nThis is used in conjunction with the OAuth2 clientconfig API to make\nthree legged OAuth2 (3LO) flows to access the data of Google users." }, "displayName": { - "description": "Optional. A user-specified description of the service account. Must be\nfewer than 100 UTF-8 bytes.", - "type": "string" + "type": "string", + "description": "Optional. A user-specified description of the service account. Must be\nfewer than 100 UTF-8 bytes." }, "etag": { - "format": "byte", "description": "Used to perform a consistent read-modify-write.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "type": "string" + }, + "email": { + "description": "@OutputOnly The email address of the service account.", "type": "string" } }, "id": "ServiceAccount", - "description": "A service account in the Identity and Access Management API.\n\nTo create a service account, specify the `project_id` and the `account_id`\nfor the account. The `account_id` is unique within the project, and is used\nto generate the service account email address and a stable\n`unique_id`.\n\nIf the account already exists, the account's resource name is returned\nin util::Status's ResourceInfo.resource_name in the format of\nprojects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The caller can\nuse the name in other methods to access the account.\n\nAll other methods can identify the service account using the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "A service account in the Identity and Access Management API.\n\nTo create a service account, specify the `project_id` and the `account_id`\nfor the account. The `account_id` is unique within the project, and is used\nto generate the service account email address and a stable\n`unique_id`.\n\nIf the account already exists, the account's resource name is returned\nin util::Status's ResourceInfo.resource_name in the format of\nprojects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}. The caller can\nuse the name in other methods to access the account.\n\nAll other methods can identify the service account using the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", "type": "object" }, "QueryGrantableRolesRequest": { @@ -172,20 +174,20 @@ "type": "string" }, "pageSize": { - "format": "int32", "description": "Optional limit on the number of roles to include in the response.", + "format": "int32", "type": "integer" }, "view": { + "enumDescriptions": [ + "Omits the `included_permissions` field.\nThis is the default value.", + "Returns all fields." + ], "enum": [ "BASIC", "FULL" ], - "type": "string", - "enumDescriptions": [ - "Omits the `included_permissions` field.\nThis is the default value.", - "Returns all fields." - ] + "type": "string" } }, "id": "QueryGrantableRolesRequest" @@ -195,48 +197,24 @@ "type": "object", "properties": { "roleId": { - "description": "The role id to use for this role.", - "type": "string" + "type": "string", + "description": "The role id to use for this role." }, "role": { - "description": "The Role resource to create.", - "$ref": "Role" + "$ref": "Role", + "description": "The Role resource to create." } }, "id": "CreateRoleRequest" }, - "ListServiceAccountKeysResponse": { - "properties": { - "keys": { - "description": "The public keys for the service account.", - "items": { - "$ref": "ServiceAccountKey" - }, - "type": "array" - } - }, - "id": "ListServiceAccountKeysResponse", - "description": "The service account keys list response.", - "type": "object" - }, - "TestIamPermissionsResponse": { - "description": "Response message for `TestIamPermissions` method.", - "type": "object", - "properties": { - "permissions": { - "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "TestIamPermissionsResponse" - }, "QueryTestablePermissionsRequest": { - "description": "A request to get permissions which can be tested on a resource.", "type": "object", "properties": { + "pageSize": { + "description": "Optional limit on the number of permissions to include in the response.", + "format": "int32", + "type": "integer" + }, "fullResourceName": { "description": "Required. The full resource name to query from the list of testable\npermissions.\n\nThe name follows the Google Cloud Platform resource format.\nFor example, a Cloud Platform project with id `my-project` will be named\n`//cloudresourcemanager.googleapis.com/projects/my-project`.", "type": "string" @@ -244,51 +222,68 @@ "pageToken": { "description": "Optional pagination token returned in an earlier\nQueryTestablePermissionsRequest.", "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Optional limit on the number of permissions to include in the response.", - "type": "integer" } }, - "id": "QueryTestablePermissionsRequest" + "id": "QueryTestablePermissionsRequest", + "description": "A request to get permissions which can be tested on a resource." + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "id": "TestIamPermissionsResponse" + }, + "ListServiceAccountKeysResponse": { + "type": "object", + "properties": { + "keys": { + "description": "The public keys for the service account.", + "type": "array", + "items": { + "$ref": "ServiceAccountKey" + } + } + }, + "id": "ListServiceAccountKeysResponse", + "description": "The service account keys list response." }, "ServiceAccountKey": { "description": "Represents a service account key.\n\nA service account has two sets of key-pairs: user-managed, and\nsystem-managed.\n\nUser-managed key-pairs can be created and deleted by users. Users are\nresponsible for rotating these keys periodically to ensure security of\ntheir service accounts. Users retain the private key of these key-pairs,\nand Google retains ONLY the public key.\n\nSystem-managed key-pairs are managed automatically by Google, and rotated\ndaily without user intervention. The private key never leaves Google's\nservers to maximize security.\n\nPublic keys for all service accounts are also published at the OAuth2\nService Account API.", "type": "object", "properties": { - "privateKeyData": { - "format": "byte", - "description": "The private key data. Only provided in `CreateServiceAccountKey`\nresponses. Make sure to keep the private key data secure because it\nallows for the assertion of the service account identity.\nWhen decoded, the private key data can be used to authenticate with\nGoogle API client libraries and with\n\u003ca href=\"/sdk/gcloud/reference/auth/activate-service-account\"\u003egcloud\nauth activate-service-account\u003c/a\u003e.", - "type": "string" - }, - "publicKeyData": { - "format": "byte", - "description": "The public key data. Only provided in `GetServiceAccountKey` responses.", - "type": "string" - }, - "name": { - "description": "The resource name of the service account key in the following format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.", - "type": "string" - }, "validBeforeTime": { - "format": "google-datetime", "description": "The key can be used before this timestamp.", + "format": "google-datetime", "type": "string" }, "keyAlgorithm": { - "enum": [ - "KEY_ALG_UNSPECIFIED", - "KEY_ALG_RSA_1024", - "KEY_ALG_RSA_2048" - ], - "description": "Specifies the algorithm (and possibly key size) for the key.", - "type": "string", "enumDescriptions": [ "An unspecified key algorithm.", "1k RSA Key.", - "2k RSA Key." - ] + "2k RSA Key.", + "HMAC." + ], + "enum": [ + "KEY_ALG_UNSPECIFIED", + "KEY_ALG_RSA_1024", + "KEY_ALG_RSA_2048", + "KEY_ALG_GCS_SYMMETRIC_HMAC" + ], + "description": "Specifies the algorithm (and possibly key size) for the key.", + "type": "string" + }, + "validAfterTime": { + "description": "The key can be used after this timestamp.", + "format": "google-datetime", + "type": "string" }, "privateKeyType": { "enum": [ @@ -304,9 +299,18 @@ "Google Credentials File format." ] }, - "validAfterTime": { - "format": "google-datetime", - "description": "The key can be used after this timestamp.", + "privateKeyData": { + "description": "The private key data. Only provided in `CreateServiceAccountKey`\nresponses. Make sure to keep the private key data secure because it\nallows for the assertion of the service account identity.\nWhen decoded, the private key data can be used to authenticate with\nGoogle API client libraries and with\n\u003ca href=\"/sdk/gcloud/reference/auth/activate-service-account\"\u003egcloud\nauth activate-service-account\u003c/a\u003e.", + "format": "byte", + "type": "string" + }, + "publicKeyData": { + "description": "The public key data. Only provided in `GetServiceAccountKey` responses.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The resource name of the service account key in the following format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.", "type": "string" } }, @@ -314,38 +318,31 @@ }, "SignBlobResponse": { "properties": { - "keyId": { - "description": "The id of the key used to sign the blob.", + "signature": { + "description": "The signed blob.", + "format": "byte", "type": "string" }, - "signature": { - "format": "byte", - "description": "The signed blob.", - "type": "string" + "keyId": { + "type": "string", + "description": "The id of the key used to sign the blob." } }, "id": "SignBlobResponse", "description": "The service account sign blob response.", "type": "object" }, - "SignJwtRequest": { - "properties": { - "payload": { - "description": "The JWT payload to sign, a JSON JWT Claim set.", - "type": "string" - } - }, - "id": "SignJwtRequest", - "description": "The service account sign JWT request.", - "type": "object" - }, "Permission": { "description": "A permission which can be included by a role.", "type": "object", "properties": { + "name": { + "description": "The name of this Permission.", + "type": "string" + }, "onlyInPredefinedRoles": { - "description": "This permission can ONLY be used in predefined roles.", - "type": "boolean" + "type": "boolean", + "description": "This permission can ONLY be used in predefined roles." }, "title": { "description": "The title of this Permission.", @@ -370,43 +367,52 @@ ] }, "stage": { - "enum": [ - "ALPHA", - "BETA", - "GA", - "DEPRECATED" - ], - "description": "The current launch stage of the permission.", "type": "string", "enumDescriptions": [ "The permission is currently in an alpha phase.", "The permission is currently in a beta phase.", "The permission is generally available.", "The permission is being deprecated." - ] - }, - "name": { - "description": "The name of this Permission.", - "type": "string" + ], + "enum": [ + "ALPHA", + "BETA", + "GA", + "DEPRECATED" + ], + "description": "The current launch stage of the permission." } }, "id": "Permission" }, + "SignJwtRequest": { + "type": "object", + "properties": { + "payload": { + "description": "The JWT payload to sign, a JSON JWT Claim set.", + "type": "string" + } + }, + "id": "SignJwtRequest", + "description": "The service account sign JWT request." + }, "PolicyDelta": { + "type": "object", "properties": { "bindingDeltas": { "description": "The delta for Bindings between two policies.", + "type": "array", "items": { "$ref": "BindingDelta" - }, - "type": "array" + } } }, "id": "PolicyDelta", - "description": "The difference delta between two policies.", - "type": "object" + "description": "The difference delta between two policies." }, "ListServiceAccountsResponse": { + "description": "The service account list response.", + "type": "object", "properties": { "nextPageToken": { "description": "To retrieve the next page of results, set\nListServiceAccountsRequest.page_token\nto this value.", @@ -414,45 +420,43 @@ }, "accounts": { "description": "The list of matching service accounts.", + "type": "array", "items": { "$ref": "ServiceAccount" - }, - "type": "array" + } } }, - "id": "ListServiceAccountsResponse", - "description": "The service account list response.", - "type": "object" + "id": "ListServiceAccountsResponse" }, "QueryGrantableRolesResponse": { - "description": "The grantable role query response.", - "type": "object", "properties": { + "roles": { + "description": "The list of matching roles.", + "type": "array", + "items": { + "$ref": "Role" + } + }, "nextPageToken": { "description": "To retrieve the next page of results, set\n`QueryGrantableRolesRequest.page_token` to this value.", "type": "string" - }, - "roles": { - "description": "The list of matching roles.", - "items": { - "$ref": "Role" - }, - "type": "array" } }, - "id": "QueryGrantableRolesResponse" + "id": "QueryGrantableRolesResponse", + "description": "The grantable role query response.", + "type": "object" }, "SignBlobRequest": { - "description": "The service account sign blob request.", - "type": "object", "properties": { "bytesToSign": { - "format": "byte", "description": "The bytes to sign.", + "format": "byte", "type": "string" } }, - "id": "SignBlobRequest" + "id": "SignBlobRequest", + "description": "The service account sign blob request.", + "type": "object" }, "SetIamPolicyRequest": { "description": "Request message for `SetIamPolicy` method.", @@ -469,16 +473,16 @@ "description": "The response containing permissions which can be tested on a resource.", "type": "object", "properties": { + "permissions": { + "description": "The Permissions testable on the requested resource.", + "type": "array", + "items": { + "$ref": "Permission" + } + }, "nextPageToken": { "description": "To retrieve the next page of results, set\n`QueryTestableRolesRequest.page_token` to this value.", "type": "string" - }, - "permissions": { - "description": "The Permissions testable on the requested resource.", - "items": { - "$ref": "Permission" - }, - "type": "array" } }, "id": "QueryTestablePermissionsResponse" @@ -490,22 +494,23 @@ "id": "Empty" }, "CreateServiceAccountKeyRequest": { - "description": "The service account key create request.", "type": "object", "properties": { "keyAlgorithm": { - "enumDescriptions": [ - "An unspecified key algorithm.", - "1k RSA Key.", - "2k RSA Key." - ], "enum": [ "KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", - "KEY_ALG_RSA_2048" + "KEY_ALG_RSA_2048", + "KEY_ALG_GCS_SYMMETRIC_HMAC" ], "description": "Which type of key and algorithm to use for the key.\nThe default is currently a 2K RSA key. However this may change in the\nfuture.", - "type": "string" + "type": "string", + "enumDescriptions": [ + "An unspecified key algorithm.", + "1k RSA Key.", + "2k RSA Key.", + "HMAC." + ] }, "privateKeyType": { "enumDescriptions": [ @@ -522,80 +527,82 @@ "type": "string" } }, - "id": "CreateServiceAccountKeyRequest" - }, - "TestIamPermissionsRequest": { - "properties": { - "permissions": { - "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "id": "TestIamPermissionsRequest", - "description": "Request message for `TestIamPermissions` method.", - "type": "object" + "id": "CreateServiceAccountKeyRequest", + "description": "The service account key create request." }, "SignJwtResponse": { - "description": "The service account sign JWT response.", "type": "object", "properties": { - "signedJwt": { - "description": "The signed JWT.", - "type": "string" - }, "keyId": { "description": "The id of the key used to sign the JWT.", "type": "string" + }, + "signedJwt": { + "description": "The signed JWT.", + "type": "string" } }, - "id": "SignJwtResponse" + "id": "SignJwtResponse", + "description": "The service account sign JWT response." + }, + "TestIamPermissionsRequest": { + "id": "TestIamPermissionsRequest", + "description": "Request message for `TestIamPermissions` method.", + "type": "object", + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "type": "array", + "items": { + "type": "string" + } + } + } }, "Policy": { "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam).", "type": "object", "properties": { + "bindings": { + "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", + "type": "array", + "items": { + "$ref": "Binding" + } + }, "etag": { - "format": "byte", "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", "type": "string" }, "version": { - "format": "int32", "description": "Version of the `Policy`. The default version is 0.", + "format": "int32", "type": "integer" - }, - "bindings": { - "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", - "items": { - "$ref": "Binding" - }, - "type": "array" } }, "id": "Policy" }, "ListRolesResponse": { + "description": "The response containing the roles defined under a resource.", + "type": "object", "properties": { + "roles": { + "description": "The Roles defined on this resource.", + "type": "array", + "items": { + "$ref": "Role" + } + }, "nextPageToken": { "description": "To retrieve the next page of results, set\n`ListRolesRequest.page_token` to this value.", "type": "string" - }, - "roles": { - "description": "The Roles defined on this resource.", - "items": { - "$ref": "Role" - }, - "type": "array" } }, - "id": "ListRolesResponse", - "description": "The response containing the roles defined under a resource.", - "type": "object" + "id": "ListRolesResponse" }, "AuditData": { + "id": "AuditData", "description": "Audit log information specific to Cloud IAM. This message is serialized\nas an `Any` type in the `ServiceData` message of an\n`AuditLog` message.", "type": "object", "properties": { @@ -603,10 +610,11 @@ "description": "Policy delta between the original policy and the newly set policy.", "$ref": "PolicyDelta" } - }, - "id": "AuditData" + } }, "BindingDelta": { + "description": "One delta entry for Binding. Each individual change (only one member in each\nentry) to a binding will be a separate entry.", + "type": "object", "properties": { "action": { "enumDescriptions": [ @@ -622,29 +630,27 @@ "description": "The action that was performed on a Binding.\nRequired", "type": "string" }, + "member": { + "type": "string", + "description": "A single identity requesting access for a Cloud Platform resource.\nFollows the same format of Binding.members.\nRequired" + }, "condition": { "$ref": "Expr", "description": "The condition that is associated with this binding.\nThis field is GOOGLE_INTERNAL.\nThis field is not logged in IAM side because it's only for audit logging.\nOptional" }, - "member": { - "description": "A single identity requesting access for a Cloud Platform resource.\nFollows the same format of Binding.members.\nRequired", - "type": "string" - }, "role": { "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", "type": "string" } }, - "id": "BindingDelta", - "description": "One delta entry for Binding. Each individual change (only one member in each\nentry) to a binding will be a separate entry.", - "type": "object" + "id": "BindingDelta" } }, - "protocol": "rest", "icons": { "x32": "http://www.google.com/images/icons/product/search-32.gif", "x16": "http://www.google.com/images/icons/product/search-16.gif" }, + "protocol": "rest", "canonicalName": "iam", "auth": { "oauth2": { @@ -663,743 +669,10 @@ "title": "Google Identity and Access Management (IAM) API", "ownerName": "Google", "resources": { - "permissions": { - "methods": { - "queryTestablePermissions": { - "id": "iam.permissions.queryTestablePermissions", - "path": "v1/permissions:queryTestablePermissions", - "request": { - "$ref": "QueryTestablePermissionsRequest" - }, - "description": "Lists the permissions testable on a resource.\nA permission is testable if it can be tested for an identity on a resource.", - "response": { - "$ref": "QueryTestablePermissionsResponse" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": {}, - "flatPath": "v1/permissions:queryTestablePermissions" - } - } - }, - "roles": { - "methods": { - "queryGrantableRoles": { - "httpMethod": "POST", - "parameterOrder": [], - "response": { - "$ref": "QueryGrantableRolesResponse" - }, - "parameters": {}, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/roles:queryGrantableRoles", - "path": "v1/roles:queryGrantableRoles", - "id": "iam.roles.queryGrantableRoles", - "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", - "request": { - "$ref": "QueryGrantableRolesRequest" - } - }, - "get": { - "id": "iam.roles.get", - "path": "v1/{+name}", - "description": "Gets a Role definition.", - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^roles/[^/]+$", - "location": "path", - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true - } - }, - "flatPath": "v1/roles/{rolesId}" - }, - "list": { - "httpMethod": "GET", - "response": { - "$ref": "ListRolesResponse" - }, - "parameterOrder": [], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", - "type": "string", - "location": "query" - }, - "showDeleted": { - "description": "Include Roles that have been deleted.", - "type": "boolean", - "location": "query" - }, - "pageToken": { - "description": "Optional pagination token returned in an earlier ListRolesResponse.", - "type": "string", - "location": "query" - }, - "pageSize": { - "location": "query", - "format": "int32", - "description": "Optional limit on the number of roles to include in the response.", - "type": "integer" - }, - "view": { - "enum": [ - "BASIC", - "FULL" - ], - "description": "Optional view for the returned Role objects.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v1/roles", - "path": "v1/roles", - "id": "iam.roles.list", - "description": "Lists the Roles defined on a resource." - } - } - }, - "organizations": { - "resources": { - "roles": { - "methods": { - "delete": { - "id": "iam.organizations.roles.delete", - "path": "v1/{+name}", - "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "etag": { - "location": "query", - "format": "byte", - "description": "Used to perform a consistent read-modify-write.", - "type": "string" - }, - "name": { - "pattern": "^organizations/[^/]+/roles/[^/]+$", - "location": "path", - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true - } - }, - "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}" - }, - "list": { - "path": "v1/{+parent}/roles", - "id": "iam.organizations.roles.list", - "description": "Lists the Roles defined on a resource.", - "httpMethod": "GET", - "response": { - "$ref": "ListRolesResponse" - }, - "parameterOrder": [ - "parent" - ], - "parameters": { - "parent": { - "pattern": "^organizations/[^/]+$", - "location": "path", - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", - "type": "string", - "required": true - }, - "showDeleted": { - "location": "query", - "description": "Include Roles that have been deleted.", - "type": "boolean" - }, - "pageToken": { - "location": "query", - "description": "Optional pagination token returned in an earlier ListRolesResponse.", - "type": "string" - }, - "pageSize": { - "format": "int32", - "description": "Optional limit on the number of roles to include in the response.", - "type": "integer", - "location": "query" - }, - "view": { - "enum": [ - "BASIC", - "FULL" - ], - "description": "Optional view for the returned Role objects.", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/organizations/{organizationsId}/roles" - }, - "create": { - "httpMethod": "POST", - "parameterOrder": [ - "parent" - ], - "response": { - "$ref": "Role" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", - "type": "string", - "required": true, - "pattern": "^organizations/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/organizations/{organizationsId}/roles", - "path": "v1/{+parent}/roles", - "id": "iam.organizations.roles.create", - "request": { - "$ref": "CreateRoleRequest" - }, - "description": "Creates a new Role." - }, - "undelete": { - "id": "iam.organizations.roles.undelete", - "path": "v1/{+name}:undelete", - "request": { - "$ref": "UndeleteRoleRequest" - }, - "description": "Undelete a Role, bringing it back in its previous state.", - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^organizations/[^/]+/roles/[^/]+$", - "location": "path", - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true - } - }, - "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete" - }, - "get": { - "id": "iam.organizations.roles.get", - "path": "v1/{+name}", - "description": "Gets a Role definition.", - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "name": { - "pattern": "^organizations/[^/]+/roles/[^/]+$", - "location": "path", - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}" - }, - "patch": { - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PATCH", - "parameters": { - "name": { - "pattern": "^organizations/[^/]+/roles/[^/]+$", - "location": "path", - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true - }, - "updateMask": { - "format": "google-fieldmask", - "description": "A mask describing which fields in the Role have changed.", - "type": "string", - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", - "id": "iam.organizations.roles.patch", - "path": "v1/{+name}", - "description": "Updates a Role definition.", - "request": { - "$ref": "Role" - } - } - } - } - } - }, "projects": { "resources": { - "roles": { - "methods": { - "create": { - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/roles", - "id": "iam.projects.roles.create", - "path": "v1/{+parent}/roles", - "request": { - "$ref": "CreateRoleRequest" - }, - "description": "Creates a new Role." - }, - "patch": { - "id": "iam.projects.roles.patch", - "path": "v1/{+name}", - "request": { - "$ref": "Role" - }, - "description": "Updates a Role definition.", - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PATCH", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/roles/[^/]+$", - "location": "path" - }, - "updateMask": { - "format": "google-fieldmask", - "description": "A mask describing which fields in the Role have changed.", - "type": "string", - "location": "query" - } - }, - "flatPath": "v1/projects/{projectsId}/roles/{rolesId}" - }, - "undelete": { - "description": "Undelete a Role, bringing it back in its previous state.", - "request": { - "$ref": "UndeleteRoleRequest" - }, - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "parameters": { - "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/roles/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/roles/{rolesId}:undelete", - "id": "iam.projects.roles.undelete", - "path": "v1/{+name}:undelete" - }, - "get": { - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "GET", - "parameters": { - "name": { - "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/roles/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", - "id": "iam.projects.roles.get", - "path": "v1/{+name}", - "description": "Gets a Role definition." - }, - "delete": { - "response": { - "$ref": "Role" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "etag": { - "format": "byte", - "description": "Used to perform a consistent read-modify-write.", - "type": "string", - "location": "query" - }, - "name": { - "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/roles/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", - "id": "iam.projects.roles.delete", - "path": "v1/{+name}", - "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed." - }, - "list": { - "description": "Lists the Roles defined on a resource.", - "response": { - "$ref": "ListRolesResponse" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "parent": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", - "type": "string", - "required": true - }, - "showDeleted": { - "description": "Include Roles that have been deleted.", - "type": "boolean", - "location": "query" - }, - "pageToken": { - "description": "Optional pagination token returned in an earlier ListRolesResponse.", - "type": "string", - "location": "query" - }, - "pageSize": { - "format": "int32", - "description": "Optional limit on the number of roles to include in the response.", - "type": "integer", - "location": "query" - }, - "view": { - "location": "query", - "enum": [ - "BASIC", - "FULL" - ], - "description": "Optional view for the returned Role objects.", - "type": "string" - } - }, - "flatPath": "v1/projects/{projectsId}/roles", - "id": "iam.projects.roles.list", - "path": "v1/{+parent}/roles" - } - } - }, "serviceAccounts": { - "resources": { - "keys": { - "methods": { - "delete": { - "description": "Deletes a ServiceAccountKey.", - "parameterOrder": [ - "name" - ], - "httpMethod": "DELETE", - "response": { - "$ref": "Empty" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", - "location": "path" - } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", - "id": "iam.projects.serviceAccounts.keys.delete", - "path": "v1/{+name}" - }, - "get": { - "description": "Gets the ServiceAccountKey\nby key id.", - "httpMethod": "GET", - "response": { - "$ref": "ServiceAccountKey" - }, - "parameterOrder": [ - "name" - ], - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "publicKeyType": { - "enum": [ - "TYPE_NONE", - "TYPE_X509_PEM_FILE", - "TYPE_RAW_PUBLIC_KEY" - ], - "description": "The output format of the public key requested.\nX509_PEM is the default output format.", - "type": "string", - "location": "query" - }, - "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", - "location": "path", - "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\n\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", - "path": "v1/{+name}", - "id": "iam.projects.serviceAccounts.keys.get" - }, - "list": { - "httpMethod": "GET", - "response": { - "$ref": "ListServiceAccountKeysResponse" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path", - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nUsing `-` as a wildcard for the project, will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true - }, - "keyTypes": { - "enum": [ - "KEY_TYPE_UNSPECIFIED", - "USER_MANAGED", - "SYSTEM_MANAGED" - ], - "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", - "type": "string", - "repeated": true, - "location": "query" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", - "path": "v1/{+name}/keys", - "id": "iam.projects.serviceAccounts.keys.list", - "description": "Lists ServiceAccountKeys." - }, - "create": { - "path": "v1/{+name}/keys", - "id": "iam.projects.serviceAccounts.keys.create", - "request": { - "$ref": "CreateServiceAccountKeyRequest" - }, - "description": "Creates a ServiceAccountKey\nand returns it.", - "httpMethod": "POST", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "ServiceAccountKey" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path", - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys" - } - } - } - }, "methods": { - "setIamPolicy": { - "description": "Sets the IAM access control policy for a\nServiceAccount.", - "request": { - "$ref": "SetIamPolicyRequest" - }, - "httpMethod": "POST", - "parameterOrder": [ - "resource" - ], - "response": { - "$ref": "Policy" - }, - "parameters": { - "resource": { - "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true, - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", - "path": "v1/{+resource}:setIamPolicy", - "id": "iam.projects.serviceAccounts.setIamPolicy" - }, - "create": { - "id": "iam.projects.serviceAccounts.create", - "path": "v1/{+name}/serviceAccounts", - "request": { - "$ref": "CreateServiceAccountRequest" - }, - "description": "Creates a ServiceAccount\nand returns it.", - "response": { - "$ref": "ServiceAccount" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+$", - "location": "path", - "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts" - }, - "signJwt": { - "response": { - "$ref": "SignJwtResponse" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "parameters": { - "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path", - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true - } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", - "id": "iam.projects.serviceAccounts.signJwt", - "path": "v1/{+name}:signJwt", - "request": { - "$ref": "SignJwtRequest" - }, - "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail." - }, "getIamPolicy": { "description": "Returns the IAM access control policy for a\nServiceAccount.", "response": { @@ -1414,75 +687,71 @@ ], "parameters": { "resource": { - "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", "required": true, + "type": "string", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path" + "location": "path", + "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field." } }, "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:getIamPolicy", - "id": "iam.projects.serviceAccounts.getIamPolicy", - "path": "v1/{+resource}:getIamPolicy" + "path": "v1/{+resource}:getIamPolicy", + "id": "iam.projects.serviceAccounts.getIamPolicy" }, "get": { "description": "Gets a ServiceAccount.", + "httpMethod": "GET", "response": { "$ref": "ServiceAccount" }, "parameterOrder": [ "name" ], - "httpMethod": "GET", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", "required": true, + "type": "string", "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "location": "path" } }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", "id": "iam.projects.serviceAccounts.get", "path": "v1/{+name}" }, "update": { - "response": { - "$ref": "ServiceAccount" - }, - "parameterOrder": [ - "name" - ], - "httpMethod": "PUT", "parameters": { "name": { - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", - "type": "string", - "required": true, "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path" + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", + "required": true, + "type": "string" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", - "id": "iam.projects.serviceAccounts.update", "path": "v1/{+name}", - "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + "id": "iam.projects.serviceAccounts.update", "request": { "$ref": "ServiceAccount" - } + }, + "description": "Updates a ServiceAccount.\n\nCurrently, only the following fields are updatable:\n`display_name` .\nThe `etag` is mandatory.", + "response": { + "$ref": "ServiceAccount" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PUT" }, "testIamPermissions": { - "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount.", - "request": { - "$ref": "TestIamPermissionsRequest" - }, "httpMethod": "POST", "parameterOrder": [ "resource" @@ -1495,18 +764,26 @@ "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", "location": "path", "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", - "type": "string", - "required": true + "required": true, + "type": "string" } }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:testIamPermissions", + "id": "iam.projects.serviceAccounts.testIamPermissions", "path": "v1/{+resource}:testIamPermissions", - "id": "iam.projects.serviceAccounts.testIamPermissions" + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "description": "Tests the specified permissions against the IAM access control policy\nfor a ServiceAccount." }, "delete": { + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", + "path": "v1/{+name}", + "id": "iam.projects.serviceAccounts.delete", + "description": "Deletes a ServiceAccount.", "response": { "$ref": "Empty" }, @@ -1519,72 +796,68 @@ ], "parameters": { "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path", - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, "type": "string", - "required": true + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path" } - }, - "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}", - "id": "iam.projects.serviceAccounts.delete", - "path": "v1/{+name}", - "description": "Deletes a ServiceAccount." + } }, "signBlob": { "description": "Signs a blob using a service account's system-managed private key.", "request": { "$ref": "SignBlobRequest" }, - "response": { - "$ref": "SignBlobResponse" - }, + "httpMethod": "POST", "parameterOrder": [ "name" ], - "httpMethod": "POST", - "parameters": { - "name": { - "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", - "location": "path", - "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", - "type": "string", - "required": true - } + "response": { + "$ref": "SignBlobResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ], + "parameters": { + "name": { + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string" + } + }, "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", "id": "iam.projects.serviceAccounts.signBlob", "path": "v1/{+name}:signBlob" }, "list": { "description": "Lists ServiceAccounts for a project.", + "httpMethod": "GET", "response": { "$ref": "ListServiceAccountsResponse" }, "parameterOrder": [ "name" ], - "httpMethod": "GET", "parameters": { - "pageToken": { - "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", - "type": "string", - "location": "query" - }, "name": { - "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", - "type": "string", - "required": true, "pattern": "^projects/[^/]+$", - "location": "path" + "location": "path", + "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "required": true, + "type": "string" + }, + "pageToken": { + "location": "query", + "description": "Optional pagination token returned in an earlier\nListServiceAccountsResponse.next_page_token.", + "type": "string" }, "pageSize": { "location": "query", - "format": "int32", "description": "Optional limit on the number of service accounts to include in the\nresponse. Further accounts can subsequently be obtained by including the\nListServiceAccountsResponse.next_page_token\nin a subsequent request.", + "format": "int32", "type": "integer" } }, @@ -1594,6 +867,739 @@ "flatPath": "v1/projects/{projectsId}/serviceAccounts", "id": "iam.projects.serviceAccounts.list", "path": "v1/{+name}/serviceAccounts" + }, + "create": { + "path": "v1/{+name}/serviceAccounts", + "id": "iam.projects.serviceAccounts.create", + "request": { + "$ref": "CreateServiceAccountRequest" + }, + "description": "Creates a ServiceAccount\nand returns it.", + "response": { + "$ref": "ServiceAccount" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "parameters": { + "name": { + "description": "Required. The resource name of the project associated with the service\naccounts, such as `projects/my-project-123`.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts" + }, + "setIamPolicy": { + "response": { + "$ref": "Policy" + }, + "parameterOrder": [ + "resource" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "resource": { + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path", + "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:setIamPolicy", + "path": "v1/{+resource}:setIamPolicy", + "id": "iam.projects.serviceAccounts.setIamPolicy", + "description": "Sets the IAM access control policy for a\nServiceAccount.", + "request": { + "$ref": "SetIamPolicyRequest" + } + }, + "signJwt": { + "httpMethod": "POST", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "SignJwtResponse" + }, + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signJwt", + "id": "iam.projects.serviceAccounts.signJwt", + "path": "v1/{+name}:signJwt", + "request": { + "$ref": "SignJwtRequest" + }, + "description": "Signs a JWT using a service account's system-managed private key.\n\nIf no expiry time (`exp`) is provided in the `SignJwtRequest`, IAM sets an\nan expiry time of one hour by default. If you request an expiry time of\nmore than one hour, the request will fail." + } + }, + "resources": { + "keys": { + "methods": { + "list": { + "response": { + "$ref": "ListServiceAccountKeysResponse" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "keyTypes": { + "enum": [ + "KEY_TYPE_UNSPECIFIED", + "USER_MANAGED", + "SYSTEM_MANAGED" + ], + "description": "Filters the types of keys the user wants to include in the list\nresponse. Duplicate key types are not allowed. If no key type\nis provided, all keys are returned.", + "type": "string", + "repeated": true, + "location": "query" + }, + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID`, will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", + "path": "v1/{+name}/keys", + "id": "iam.projects.serviceAccounts.keys.list", + "description": "Lists ServiceAccountKeys." + }, + "get": { + "response": { + "$ref": "ServiceAccountKey" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "publicKeyType": { + "type": "string", + "location": "query", + "enum": [ + "TYPE_NONE", + "TYPE_X509_PEM_FILE", + "TYPE_RAW_PUBLIC_KEY" + ], + "description": "The output format of the public key requested.\nX509_PEM is the default output format." + }, + "name": { + "location": "path", + "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}", + "path": "v1/{+name}", + "id": "iam.projects.serviceAccounts.keys.get", + "description": "Gets the ServiceAccountKey\nby key id." + }, + "create": { + "response": { + "$ref": "ServiceAccountKey" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys", + "path": "v1/{+name}/keys", + "id": "iam.projects.serviceAccounts.keys.create", + "description": "Creates a ServiceAccountKey\nand returns it.", + "request": { + "$ref": "CreateServiceAccountKeyRequest" + } + }, + "delete": { + "path": "v1/{+name}", + "id": "iam.projects.serviceAccounts.keys.delete", + "description": "Deletes a ServiceAccountKey.", + "response": { + "$ref": "Empty" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", + "location": "path", + "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/keys/{keysId}" + } + } + } + } + }, + "roles": { + "methods": { + "delete": { + "parameters": { + "etag": { + "description": "Used to perform a consistent read-modify-write.", + "format": "byte", + "type": "string", + "location": "query" + }, + "name": { + "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/roles/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", + "id": "iam.projects.roles.delete", + "path": "v1/{+name}", + "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + "httpMethod": "DELETE", + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ] + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListRolesResponse" + }, + "parameters": { + "view": { + "enum": [ + "BASIC", + "FULL" + ], + "description": "Optional view for the returned Role objects.", + "type": "string", + "location": "query" + }, + "parent": { + "pattern": "^projects/[^/]+$", + "location": "path", + "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "required": true, + "type": "string" + }, + "showDeleted": { + "type": "boolean", + "location": "query", + "description": "Include Roles that have been deleted." + }, + "pageToken": { + "location": "query", + "description": "Optional pagination token returned in an earlier ListRolesResponse.", + "type": "string" + }, + "pageSize": { + "location": "query", + "description": "Optional limit on the number of roles to include in the response.", + "format": "int32", + "type": "integer" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/roles", + "id": "iam.projects.roles.list", + "path": "v1/{+parent}/roles", + "description": "Lists the Roles defined on a resource." + }, + "create": { + "description": "Creates a new Role.", + "request": { + "$ref": "CreateRoleRequest" + }, + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "parent": { + "location": "path", + "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/roles", + "path": "v1/{+parent}/roles", + "id": "iam.projects.roles.create" + }, + "undelete": { + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "parameters": { + "name": { + "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/roles/[^/]+$", + "location": "path" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/projects/{projectsId}/roles/{rolesId}:undelete", + "path": "v1/{+name}:undelete", + "id": "iam.projects.roles.undelete", + "request": { + "$ref": "UndeleteRoleRequest" + }, + "description": "Undelete a Role, bringing it back in its previous state." + }, + "get": { + "description": "Gets a Role definition.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/roles/[^/]+$" + } + }, + "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", + "id": "iam.projects.roles.get", + "path": "v1/{+name}" + }, + "patch": { + "description": "Updates a Role definition.", + "request": { + "$ref": "Role" + }, + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PATCH", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^projects/[^/]+/roles/[^/]+$", + "location": "path" + }, + "updateMask": { + "description": "A mask describing which fields in the Role have changed.", + "format": "google-fieldmask", + "type": "string", + "location": "query" + } + }, + "flatPath": "v1/projects/{projectsId}/roles/{rolesId}", + "path": "v1/{+name}", + "id": "iam.projects.roles.patch" + } + } + } + } + }, + "roles": { + "methods": { + "list": { + "response": { + "$ref": "ListRolesResponse" + }, + "parameterOrder": [], + "httpMethod": "GET", + "parameters": { + "parent": { + "location": "query", + "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "type": "string" + }, + "showDeleted": { + "location": "query", + "description": "Include Roles that have been deleted.", + "type": "boolean" + }, + "pageToken": { + "location": "query", + "description": "Optional pagination token returned in an earlier ListRolesResponse.", + "type": "string" + }, + "pageSize": { + "description": "Optional limit on the number of roles to include in the response.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "view": { + "location": "query", + "enum": [ + "BASIC", + "FULL" + ], + "description": "Optional view for the returned Role objects.", + "type": "string" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/roles", + "path": "v1/roles", + "id": "iam.roles.list", + "description": "Lists the Roles defined on a resource." + }, + "get": { + "description": "Gets a Role definition.", + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "GET", + "parameters": { + "name": { + "required": true, + "type": "string", + "pattern": "^roles/[^/]+$", + "location": "path", + "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/roles/{rolesId}", + "path": "v1/{+name}", + "id": "iam.roles.get" + }, + "queryGrantableRoles": { + "response": { + "$ref": "QueryGrantableRolesResponse" + }, + "parameterOrder": [], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": {}, + "flatPath": "v1/roles:queryGrantableRoles", + "path": "v1/roles:queryGrantableRoles", + "id": "iam.roles.queryGrantableRoles", + "description": "Queries roles that can be granted on a particular resource.\nA role is grantable if it can be used as the role in a binding for a policy\nfor that resource.", + "request": { + "$ref": "QueryGrantableRolesRequest" + } + } + } + }, + "permissions": { + "methods": { + "queryTestablePermissions": { + "description": "Lists the permissions testable on a resource.\nA permission is testable if it can be tested for an identity on a resource.", + "request": { + "$ref": "QueryTestablePermissionsRequest" + }, + "httpMethod": "POST", + "parameterOrder": [], + "response": { + "$ref": "QueryTestablePermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": {}, + "flatPath": "v1/permissions:queryTestablePermissions", + "id": "iam.permissions.queryTestablePermissions", + "path": "v1/permissions:queryTestablePermissions" + } + } + }, + "organizations": { + "resources": { + "roles": { + "methods": { + "patch": { + "parameters": { + "updateMask": { + "type": "string", + "location": "query", + "description": "A mask describing which fields in the Role have changed.", + "format": "google-fieldmask" + }, + "name": { + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+/roles/[^/]+$", + "location": "path", + "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + "path": "v1/{+name}", + "id": "iam.organizations.roles.patch", + "request": { + "$ref": "Role" + }, + "description": "Updates a Role definition.", + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "PATCH" + }, + "undelete": { + "description": "Undelete a Role, bringing it back in its previous state.", + "request": { + "$ref": "UndeleteRoleRequest" + }, + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "POST", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "pattern": "^organizations/[^/]+/roles/[^/]+$", + "location": "path", + "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string" + } + }, + "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}:undelete", + "path": "v1/{+name}:undelete", + "id": "iam.organizations.roles.undelete" + }, + "get": { + "description": "Gets a Role definition.", + "httpMethod": "GET", + "parameterOrder": [ + "name" + ], + "response": { + "$ref": "Role" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "name": { + "location": "path", + "description": "The resource name of the role in one of the following formats:\n`roles/{ROLE_NAME}`\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+/roles/[^/]+$" + } + }, + "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + "id": "iam.organizations.roles.get", + "path": "v1/{+name}" + }, + "delete": { + "description": "Soft deletes a role. The role is suspended and cannot be used to create new\nIAM Policy Bindings.\nThe Role will not be included in `ListRoles()` unless `show_deleted` is set\nin the `ListRolesRequest`. The Role contains the deleted boolean set.\nExisting Bindings remains, but are inactive. The Role can be undeleted\nwithin 7 days. After 7 days the Role is deleted and all Bindings associated\nwith the role are removed.", + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "name" + ], + "httpMethod": "DELETE", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "parameters": { + "etag": { + "location": "query", + "description": "Used to perform a consistent read-modify-write.", + "format": "byte", + "type": "string" + }, + "name": { + "description": "The resource name of the role in one of the following formats:\n`organizations/{ORGANIZATION_ID}/roles/{ROLE_NAME}`\n`projects/{PROJECT_ID}/roles/{ROLE_NAME}`", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+/roles/[^/]+$", + "location": "path" + } + }, + "flatPath": "v1/organizations/{organizationsId}/roles/{rolesId}", + "path": "v1/{+name}", + "id": "iam.organizations.roles.delete" + }, + "list": { + "httpMethod": "GET", + "parameterOrder": [ + "parent" + ], + "response": { + "$ref": "ListRolesResponse" + }, + "parameters": { + "parent": { + "description": "The resource name of the parent resource in one of the following formats:\n`` (empty string) -- this refers to curated roles.\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$", + "location": "path" + }, + "showDeleted": { + "description": "Include Roles that have been deleted.", + "type": "boolean", + "location": "query" + }, + "pageToken": { + "description": "Optional pagination token returned in an earlier ListRolesResponse.", + "type": "string", + "location": "query" + }, + "pageSize": { + "description": "Optional limit on the number of roles to include in the response.", + "format": "int32", + "type": "integer", + "location": "query" + }, + "view": { + "type": "string", + "location": "query", + "enum": [ + "BASIC", + "FULL" + ], + "description": "Optional view for the returned Role objects." + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/organizations/{organizationsId}/roles", + "id": "iam.organizations.roles.list", + "path": "v1/{+parent}/roles", + "description": "Lists the Roles defined on a resource." + }, + "create": { + "response": { + "$ref": "Role" + }, + "parameterOrder": [ + "parent" + ], + "httpMethod": "POST", + "parameters": { + "parent": { + "location": "path", + "description": "The resource name of the parent resource in one of the following formats:\n`organizations/{ORGANIZATION_ID}`\n`projects/{PROJECT_ID}`", + "required": true, + "type": "string", + "pattern": "^organizations/[^/]+$" + } + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "flatPath": "v1/organizations/{organizationsId}/roles", + "path": "v1/{+parent}/roles", + "id": "iam.organizations.roles.create", + "request": { + "$ref": "CreateRoleRequest" + }, + "description": "Creates a new Role." } } } @@ -1601,22 +1607,50 @@ } }, "parameters": { - "uploadType": { - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "pp": { + "type": "boolean", + "default": "true", + "location": "query", + "description": "Pretty-print response." + }, + "bearer_token": { + "description": "OAuth bearer token.", "type": "string", "location": "query" }, + "oauth_token": { + "location": "query", + "description": "OAuth 2.0 token for the current user.", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "type": "string", + "location": "query" + }, + "prettyPrint": { + "location": "query", + "description": "Returns response with indentations and line breaks.", + "type": "boolean", + "default": "true" + }, + "uploadType": { + "location": "query", + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "type": "string" + }, "fields": { "location": "query", "description": "Selector specifying which fields to include in a partial response.", "type": "string" }, "callback": { - "location": "query", "description": "JSONP", - "type": "string" + "type": "string", + "location": "query" }, "$.xgafv": { + "type": "string", "enumDescriptions": [ "v1 error format", "v2 error format" @@ -1626,12 +1660,9 @@ "1", "2" ], - "description": "V1 error format.", - "type": "string" + "description": "V1 error format." }, "alt": { - "description": "Data format for response.", - "default": "json", "enum": [ "json", "media", @@ -1643,60 +1674,33 @@ "Media download with context-dependent Content-Type", "Responses with Content-Type of application/x-protobuf" ], - "location": "query" - }, - "access_token": { "location": "query", - "description": "OAuth access token.", - "type": "string" + "description": "Data format for response.", + "default": "json" }, "key": { "location": "query", "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", "type": "string" }, + "access_token": { + "location": "query", + "description": "OAuth access token.", + "type": "string" + }, "quotaUser": { "location": "query", "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", "type": "string" - }, - "pp": { - "location": "query", - "description": "Pretty-print response.", - "default": "true", - "type": "boolean" - }, - "oauth_token": { - "location": "query", - "description": "OAuth 2.0 token for the current user.", - "type": "string" - }, - "bearer_token": { - "description": "OAuth bearer token.", - "type": "string", - "location": "query" - }, - "upload_protocol": { - "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" - }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "default": "true", - "type": "boolean" } }, "version": "v1", "baseUrl": "https://iam.googleapis.com/", + "servicePath": "", "kind": "discovery#restDescription", "description": "Manages identity and access control for Google Cloud Platform resources, including the creation of service accounts, which you can use to authenticate to Google and make API calls.", - "servicePath": "", "basePath": "", "id": "iam:v1", "documentationLink": "https://cloud.google.com/iam/", - "revision": "20170907", - "discoveryVersion": "v1", - "version_module": true + "revision": "20171020" } diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 1090157f0a..71f9ff8934 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -364,6 +364,7 @@ type CreateServiceAccountKeyRequest struct { // "KEY_ALG_UNSPECIFIED" - An unspecified key algorithm. // "KEY_ALG_RSA_1024" - 1k RSA Key. // "KEY_ALG_RSA_2048" - 2k RSA Key. + // "KEY_ALG_GCS_SYMMETRIC_HMAC" - HMAC. KeyAlgorithm string `json:"keyAlgorithm,omitempty"` // PrivateKeyType: The output format of the private key. @@ -1064,17 +1065,15 @@ func (s *Role) MarshalJSON() ([]byte, error) { // returned // in util::Status's ResourceInfo.resource_name in the format // of -// projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}. The -// caller can +// projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}. The caller can // use the name in other methods to access the account. // // All other methods can identify the service account using the // format -// `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}` -// . -// Using `-` as a wildcard for the project will infer the project +// `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. +// Using `-` as a wildcard for the `PROJECT_ID` will infer the project // from -// the account. The `account` value can be the `email` address or +// the account. The `ACCOUNT` value can be the `email` address or // the // `unique_id` of the service account. type ServiceAccount struct { @@ -1091,19 +1090,17 @@ type ServiceAccount struct { // Name: The resource name of the service account in the following // format: - // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} - // `. + // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. // - // Requests using `-` as a wildcard for the project will infer the - // project - // from the `account` and the `account` value can be the `email` address - // or - // the `unique_id` of the service account. + // Requests using `-` as a wildcard for the `PROJECT_ID` will infer + // the + // project from the `account` and the `ACCOUNT` value can be the + // `email` + // address or the `unique_id` of the service account. // // In responses the resource name will always be in the // format - // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}` - // . + // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`. Name string `json:"name,omitempty"` // Oauth2ClientId: @OutputOnly. The OAuth2 client id for the service @@ -1179,12 +1176,12 @@ type ServiceAccountKey struct { // "KEY_ALG_UNSPECIFIED" - An unspecified key algorithm. // "KEY_ALG_RSA_1024" - 1k RSA Key. // "KEY_ALG_RSA_2048" - 2k RSA Key. + // "KEY_ALG_GCS_SYMMETRIC_HMAC" - HMAC. KeyAlgorithm string `json:"keyAlgorithm,omitempty"` // Name: The resource name of the service account key in the following // format - // `projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/ - // keys/{key}`. + // `projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`. Name string `json:"name,omitempty"` // PrivateKeyData: The private key data. Only provided in @@ -3746,7 +3743,7 @@ func (c *ProjectsServiceAccountsDeleteCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -3885,7 +3882,7 @@ func (c *ProjectsServiceAccountsGetCall) Do(opts ...googleapi.CallOption) (*Serv // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -4473,7 +4470,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -4615,7 +4612,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -4891,7 +4888,7 @@ func (c *ProjectsServiceAccountsUpdateCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nRequests using `-` as a wildcard for the project will infer the project\nfrom the `account` and the `account` value can be the `email` address or\nthe `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nRequests using `-` as a wildcard for the `PROJECT_ID` will infer the\nproject from the `account` and the `ACCOUNT` value can be the `email`\naddress or the `unique_id` of the service account.\n\nIn responses the resource name will always be in the format\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5027,7 +5024,7 @@ func (c *ProjectsServiceAccountsKeysCreateCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, @@ -5155,7 +5152,7 @@ func (c *ProjectsServiceAccountsKeysDeleteCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "name": { - // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, @@ -5308,7 +5305,7 @@ func (c *ProjectsServiceAccountsKeysGetCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "name": { - // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}/keys/{key}`.\n\nUsing `-` as a wildcard for the project will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account key in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}/keys/{key}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID` will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+/keys/[^/]+$", // "required": true, @@ -5482,7 +5479,7 @@ func (c *ProjectsServiceAccountsKeysListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "name": { - // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}`.\n\nUsing `-` as a wildcard for the project, will infer the project from\nthe account. The `account` value can be the `email` address or the\n`unique_id` of the service account.", + // "description": "The resource name of the service account in the following format:\n`projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}`.\n\nUsing `-` as a wildcard for the `PROJECT_ID`, will infer the project from\nthe account. The `ACCOUNT` value can be the `email` address or the\n`unique_id` of the service account.", // "location": "path", // "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 5147191780..dde7a62496 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -16,6 +16,7 @@ package internal import ( + "errors" "net/http" "golang.org/x/oauth2" @@ -34,4 +35,20 @@ type DialSettings struct { HTTPClient *http.Client GRPCDialOpts []grpc.DialOption GRPCConn *grpc.ClientConn + NoAuth bool +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + return nil } diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json index 7cd7d2a7fb..67a0378658 100644 --- a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json +++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json @@ -20,7 +20,7 @@ "basePath": "/", "rootUrl": "https://www.googleapis.com/", "servicePath": "", - "batchPath": "batch", + "batchPath": "batch/oauth2/v2", "parameters": { "alt": { "type": "string", diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index e3080e38d3..ffbee32951 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -160,3 +160,16 @@ func WithAPIKey(apiKey string) ClientOption { type withAPIKey string func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index e382532f18..f49c93f785 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/dkoYxqfGO2jaG60VA5h0K8BOG4k\"", + "etag": "\"YWOzh2SDasdU84ArJnpYek-OMdg/akxawO6Ey81E_n6KZZ_RFctOG6Q\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", - "revision": "20170824", + "revision": "20171011", "title": "Cloud Storage JSON API", "description": "Stores and retrieves potentially large, immutable data objects.", "ownerDomain": "google.com", @@ -1090,7 +1090,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1123,7 +1123,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1153,7 +1153,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1185,7 +1185,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1220,7 +1220,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1259,7 +1259,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1308,7 +1308,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1360,7 +1360,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1392,7 +1392,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1474,6 +1474,11 @@ "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" } }, "parameterOrder": [ @@ -1533,6 +1538,11 @@ "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" } }, "parameterOrder": [ @@ -1628,7 +1638,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1660,7 +1670,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1700,7 +1710,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1798,7 +1808,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1861,7 +1871,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1894,7 +1904,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1924,7 +1934,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -1968,7 +1978,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2003,7 +2013,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2042,7 +2052,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2085,7 +2095,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2119,7 +2129,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2152,7 +2162,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2185,7 +2195,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2239,7 +2249,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2285,7 +2295,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2328,7 +2338,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2373,7 +2383,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2421,7 +2431,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2473,7 +2483,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2555,7 +2565,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2697,7 +2707,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2771,7 +2781,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2848,7 +2858,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -2895,7 +2905,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3001,7 +3011,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3088,7 +3098,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" }, "versions": { @@ -3116,7 +3126,7 @@ "id": "storage.objects.patch", "path": "b/{bucket}/o/{object}", "httpMethod": "PATCH", - "description": "Patches an object's metadata.", + "description": "Updates an object's metadata. This method supports patch semantics.", "parameters": { "bucket": { "type": "string", @@ -3196,7 +3206,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3213,9 +3223,7 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.full_control" - ], - "supportsMediaDownload": true, - "useMediaDownloadService": true + ] }, "rewrite": { "id": "storage.objects.rewrite", @@ -3353,7 +3361,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3401,7 +3409,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3454,7 +3462,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3558,7 +3566,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" } }, @@ -3629,7 +3637,7 @@ }, "userProject": { "type": "string", - "description": "The project to be billed for this request, for Requester Pays buckets.", + "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query" }, "versions": { @@ -3674,6 +3682,11 @@ "description": "Project ID", "required": true, "location": "path" + }, + "userProject": { + "type": "string", + "description": "The project to be billed for this request.", + "location": "query" } }, "parameterOrder": [ diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 168f4edc29..62a627594c 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1839,7 +1839,7 @@ func (r *BucketAccessControlsService) Delete(bucket string, entity string) *Buck } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -1923,7 +1923,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -1959,7 +1959,7 @@ func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketA } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -2081,7 +2081,7 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2118,7 +2118,7 @@ func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c @@ -2224,7 +2224,7 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2263,7 +2263,7 @@ func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsL } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c @@ -2377,7 +2377,7 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2417,7 +2417,7 @@ func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucket } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c @@ -2531,7 +2531,7 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2573,7 +2573,7 @@ func (r *BucketAccessControlsService) Update(bucket string, entity string, bucke } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -2687,7 +2687,7 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2741,7 +2741,7 @@ func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -2829,7 +2829,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -2892,7 +2892,7 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -3031,7 +3031,7 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -3070,7 +3070,7 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c @@ -3184,7 +3184,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -3274,6 +3274,13 @@ func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { return c } +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3422,6 +3429,11 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // ], // "location": "query", // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" // } // }, // "path": "b", @@ -3491,6 +3503,13 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3625,6 +3644,11 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { // ], // "location": "query", // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" // } // }, // "path": "b", @@ -3753,7 +3777,7 @@ func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { c.urlParams_.Set("userProject", userProject) return c @@ -3924,7 +3948,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -3964,7 +3988,7 @@ func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSet } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c @@ -4070,7 +4094,7 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -4112,7 +4136,7 @@ func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c @@ -4234,7 +4258,7 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -4344,7 +4368,7 @@ func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -4515,7 +4539,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -4650,7 +4674,7 @@ func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -4734,7 +4758,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -4770,7 +4794,7 @@ func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) * } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -4892,7 +4916,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -4930,7 +4954,7 @@ func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccessc } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c @@ -5036,7 +5060,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5092,7 +5116,7 @@ func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagen } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c @@ -5218,7 +5242,7 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5258,7 +5282,7 @@ func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c @@ -5372,7 +5396,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5414,7 +5438,7 @@ func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -5528,7 +5552,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5568,7 +5592,7 @@ func (r *NotificationsService) Delete(bucket string, notification string) *Notif } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -5652,7 +5676,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5688,7 +5712,7 @@ func (r *NotificationsService) Get(bucket string, notification string) *Notifica } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -5810,7 +5834,7 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5850,7 +5874,7 @@ func (r *NotificationsService) Insert(bucket string, notification *Notification) } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { c.urlParams_.Set("userProject", userProject) return c @@ -5956,7 +5980,7 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -5997,7 +6021,7 @@ func (r *NotificationsService) List(bucket string) *NotificationsListCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { c.urlParams_.Set("userProject", userProject) return c @@ -6111,7 +6135,7 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -6162,7 +6186,7 @@ func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAcc } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -6260,7 +6284,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -6306,7 +6330,7 @@ func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccess } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -6442,7 +6466,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -6489,7 +6513,7 @@ func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAcc } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c @@ -6609,7 +6633,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -6658,7 +6682,7 @@ func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAcces } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c @@ -6786,7 +6810,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -6836,7 +6860,7 @@ func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAcce } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c @@ -6964,7 +6988,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -7016,7 +7040,7 @@ func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAcc } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -7144,7 +7168,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -7235,7 +7259,7 @@ func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { c.urlParams_.Set("userProject", userProject) return c @@ -7403,7 +7427,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -7567,7 +7591,7 @@ func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyC } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { c.urlParams_.Set("userProject", userProject) return c @@ -7801,7 +7825,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -7890,7 +7914,7 @@ func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { c.urlParams_.Set("userProject", userProject) return c @@ -8004,7 +8028,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -8095,7 +8119,7 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { c.urlParams_.Set("userProject", userProject) return c @@ -8276,7 +8300,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -8327,7 +8351,7 @@ func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPol } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c @@ -8455,7 +8479,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -8594,7 +8618,7 @@ func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { c.urlParams_.Set("userProject", userProject) return c @@ -8857,7 +8881,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -8947,7 +8971,7 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { c.urlParams_.Set("userProject", userProject) return c @@ -9105,7 +9129,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // }, @@ -9164,7 +9188,8 @@ type ObjectsPatchCall struct { header_ http.Header } -// Patch: Patches an object's metadata. +// Patch: Updates an object's metadata. This method supports patch +// semantics. func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -9249,7 +9274,7 @@ func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { c.urlParams_.Set("userProject", userProject) return c @@ -9263,9 +9288,9 @@ func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { return c } -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { c.ctx_ = ctx return c @@ -9304,22 +9329,6 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsPatchCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, err - } - return res, nil -} - // Do executes the "storage.objects.patch" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either @@ -9358,7 +9367,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Patches an object's metadata.", + // "description": "Updates an object's metadata. This method supports patch semantics.", // "httpMethod": "PATCH", // "id": "storage.objects.patch", // "parameterOrder": [ @@ -9443,7 +9452,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -9458,9 +9467,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true + // ] // } } @@ -9641,7 +9648,7 @@ func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRe } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { c.urlParams_.Set("userProject", userProject) return c @@ -9875,7 +9882,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -9926,7 +9933,7 @@ func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPol } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c @@ -10046,7 +10053,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -10098,7 +10105,7 @@ func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTes } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c @@ -10234,7 +10241,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -10351,7 +10358,7 @@ func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { c.urlParams_.Set("userProject", userProject) return c @@ -10545,7 +10552,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } @@ -10634,7 +10641,7 @@ func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. +// be billed for this request. Required for Requester Pays buckets. func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { c.urlParams_.Set("userProject", userProject) return c @@ -10784,7 +10791,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "type": "string" // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // }, @@ -10833,6 +10840,13 @@ func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAc return c } +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -10939,6 +10953,11 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi // "location": "path", // "required": true, // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{projectId}/serviceAccount", diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index a04956d981..eda6e5eddc 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -36,6 +36,9 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, for _, opt := range opts { opt.Apply(&o) } + if err := o.Validate(); err != nil { + return nil, "", err + } if o.GRPCConn != nil { return nil, "", errors.New("unsupported gRPC connection specified") } @@ -43,30 +46,32 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if o.HTTPClient != nil { return o.HTTPClient, o.Endpoint, nil } - if o.APIKey != "" { - hc := &http.Client{ + uat := userAgentTransport{ + base: baseTransport(ctx), + userAgent: o.UserAgent, + } + var hc *http.Client + switch { + case o.NoAuth: + hc = &http.Client{Transport: uat} + case o.APIKey != "": + hc = &http.Client{ Transport: &transport.APIKey{ - Key: o.APIKey, - Transport: userAgentTransport{ - base: baseTransport(ctx), - userAgent: o.UserAgent, - }, + Key: o.APIKey, + Transport: uat, }, } - return hc, o.Endpoint, nil - } - creds, err := internal.Creds(ctx, &o) - if err != nil { - return nil, "", err - } - hc := &http.Client{ - Transport: &oauth2.Transport{ - Source: creds.TokenSource, - Base: userAgentTransport{ - base: baseTransport(ctx), - userAgent: o.UserAgent, + default: + creds, err := internal.Creds(ctx, &o) + if err != nil { + return nil, "", err + } + hc = &http.Client{ + Transport: &oauth2.Transport{ + Source: creds.TokenSource, + Base: uat, }, - }, + } } return hc, o.Endpoint, nil } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 40e79375bf..8867ae7812 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -45,7 +45,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // error message is needed, put the localized message in the error details or // localize it in the client. The optional error details may contain arbitrary // information about the error. There is a predefined set of error detail types -// in the package `google.rpc` which can be used for common error conditions. +// in the package `google.rpc` that can be used for common error conditions. // // # Language mapping // @@ -68,7 +68,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // errors. // // - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting purpose. +// have a `Status` message for error reporting. // // - Batch operations. If a client uses batch request and batch response, the // `Status` message should be used directly inside batch response, one for @@ -87,8 +87,8 @@ type Status struct { // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - // A list of messages that carry the error details. There will be a - // common set of message types for APIs to use. + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. Details []*google_protobuf.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` } diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index eb284c238f..39606b564a 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -23,10 +23,10 @@ proto: go generate google.golang.org/grpc/... test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... + go test -cpu 1,4 google.golang.org/grpc/... testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... + go test -race -cpu 1,4 google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index f1f789cfbe..118327bb17 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,6 +1,6 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. @@ -16,7 +16,8 @@ $ go get -u google.golang.org/grpc Prerequisites ------------- -This requires Go 1.6 or later. +This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go +release (1.8). Constraints ----------- diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 090fbe87c5..c40facce51 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -25,14 +25,12 @@ import ( // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var ( - DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - baseDelay: 1.0 * time.Second, - factor: 1.6, - jitter: 0.2, - } -) +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, +} // backoffStrategy defines the methodology for backing off after a grpc // connection failure. diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index 0fec7b6a36..ab65049ddc 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -403,6 +403,6 @@ type pickFirst struct { *roundRobin } -func pickFirstBalancer(r naming.Resolver) Balancer { +func pickFirstBalancerV1(r naming.Resolver) Balancer { return &pickFirst{&roundRobin{r: r}} } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 6d83a1044e..2ce9a346ae 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -33,8 +33,6 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) - // defaultBuilder is the default balancer to use. - defaultBuilder Builder // TODO(bar) install pickfirst as default. ) // Register registers the balancer builder to the balancer map. @@ -44,13 +42,12 @@ func Register(b Builder) { } // Get returns the resolver builder registered with the given name. -// If no builder is register with the name, the default pickfirst will -// be used. +// If no builder is register with the name, nil will be returned. func Get(name string) Builder { if b, ok := m[name]; ok { return b } - return defaultBuilder + return nil } // SubConn represents a gRPC sub connection. @@ -182,6 +179,10 @@ type Picker interface { // the connectivity states. // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. type Balancer interface { // HandleSubConnStateChange is called by gRPC when the connectivity state // of sc has changed. @@ -196,6 +197,7 @@ type Balancer interface { // An empty address slice and a non-nil error will be passed if the resolver returns // non-nil error to gRPC. HandleResolvedAddrs([]resolver.Address, error) - // Close closes the balancer. + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. Close() } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 0000000000..99e71cd390 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,241 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return &rrBuilder{} +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrBuilder struct{} + +func (*rrBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &rrBalancer{ + cc: cc, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: newPicker([]balancer.SubConn{}, nil), + } +} + +func (*rrBuilder) Name() string { + return "roundrobin" +} + +type rrBalancer struct { + cc balancer.ClientConn + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker *picker +} + +func (b *rrBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("roundrobin.rrBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("roundrobin.rrBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("roundrobin.rrBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - or does round robin selection of all READY SubConns otherwise. +func (b *rrBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = newPicker(nil, balancer.ErrTransientFailure) + return + } + var readySCs []balancer.SubConn + for sc, st := range b.scStates { + if st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + b.picker = newPicker(readySCs, nil) +} + +func (b *rrBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("roundrobin.rrBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("roundrobin.rrBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) + return +} + +// Close is a nop because roundrobin balancer doesn't internal state to clean +// up, and it doesn't need to call RemoveSubConn for the SubConns. +func (b *rrBalancer) Close() { +} + +type picker struct { + // If err is not nil, Pick always returns this err. It's immutable after + // picker is created. + err error + + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func newPicker(scs []balancer.SubConn, err error) *picker { + grpclog.Infof("roundrobinPicker: newPicker called with scs: %v, %v", scs, err) + if err != nil { + return &picker{err: err} + } + return &picker{ + subConns: scs, + } +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 404377dbc3..c673b98fd2 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -27,45 +27,186 @@ import ( "google.golang.org/grpc/resolver" ) -// TODO(bar) move ClientConn methods to clientConn file. +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} -func (cc *ClientConn) updatePicker(p balancer.Picker) { - // TODO(bar) add a goroutine and sync it. - // TODO(bar) implement blocking behavior and unblock the previous pick. - cc.pmu.Lock() - cc.picker = p - cc.pmu.Unlock() +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error } // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} + + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequencially, so the balancer can be implemeneted +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + for acbw := range ccb.subConns { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs) ac, err := ccb.cc.newAddrConn(addrs) if err != nil { return nil, err } acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - grpclog.Infof("ccBalancerWrapper: removing subconn") acbw, ok := sc.(*acBalancerWrapper) if !ok { return } + delete(ccb.subConns, acbw) ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { - // TODO(bar) update cc connectivity state. - ccb.cc.updatePicker(p) + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) } func (ccb *ccBalancerWrapper) Target() string { @@ -80,14 +221,16 @@ type acBalancerWrapper struct { } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs) acbw.mu.Lock() defer acbw.mu.Unlock() - // TODO(bar) update the addresses or tearDown and create a new ac. if !acbw.ac.tryUpdateAddrs(addrs) { cc := acbw.ac.cc acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the states update will be ignored by balancer. + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() @@ -105,7 +248,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.ac = ac ac.acbw = acbw if acState != connectivity.Idle { - ac.connect(false) + ac.connect() } } } @@ -113,7 +256,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect(false) + acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index b6002b739d..6cb39071c1 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -19,10 +19,12 @@ package grpc import ( + "strings" "sync" "golang.org/x/net/context" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" @@ -33,18 +35,27 @@ type balancerWrapperBuilder struct { } func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(cc.Target(), BalancerConfig{ + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ DialCreds: opts.DialCreds, Dialer: opts.Dialer, }) _, pickfirst := bwb.b.(*pickFirst) bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: targetAddr, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, } cc.UpdateBalancerState(connectivity.Idle, bw) go bw.lbWatcher() @@ -65,7 +76,12 @@ type balancerWrapper struct { balancer Balancer // The v1 balancer. pickfirst bool - cc balancer.ClientConn + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State mu sync.Mutex conns map[resolver.Address]balancer.SubConn @@ -81,12 +97,11 @@ type balancerWrapper struct { // connections accordingly. func (bw *balancerWrapper) lbWatcher() { <-bw.startCh - grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst) notifyCh := bw.balancer.Notify() if notifyCh == nil { // There's no resolver in the balancer. Connect directly. a := resolver.Address{ - Addr: bw.cc.Target(), + Addr: bw.targetAddr, Type: resolver.Backend, } sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) @@ -96,7 +111,7 @@ func (bw *balancerWrapper) lbWatcher() { bw.mu.Lock() bw.conns[a] = sc bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.cc.Target()}, + addr: Address{Addr: bw.targetAddr}, s: connectivity.Idle, } bw.mu.Unlock() @@ -134,7 +149,7 @@ func (bw *balancerWrapper) lbWatcher() { newAddr := resolver.Address{ Addr: a.Addr, Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", // TODO(bar) support servername. + ServerName: "", Metadata: a.Metadata, } newAddrs = append(newAddrs, newAddr) @@ -173,7 +188,7 @@ func (bw *balancerWrapper) lbWatcher() { resAddrs[resolver.Address{ Addr: a.Addr, Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", // TODO(bar) support servername. + ServerName: "", Metadata: a.Metadata, }] = a } @@ -187,7 +202,7 @@ func (bw *balancerWrapper) lbWatcher() { if _, ok := resAddrs[a]; !ok { del = append(del, c) delete(bw.conns, a) - delete(bw.connSt, c) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. } } bw.mu.Unlock() @@ -214,7 +229,6 @@ func (bw *balancerWrapper) lbWatcher() { } func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s) bw.mu.Lock() defer bw.mu.Unlock() scSt, ok := bw.connSt[sc] @@ -230,12 +244,18 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne scSt.down = bw.balancer.Up(scSt.addr) } else if oldS == connectivity.Ready && s != connectivity.Ready { if scSt.down != nil { - scSt.down(errConnClosing) // TODO(bar) what error to use? + scSt.down(errConnClosing) } } - // The connectivity state is ignored by clientConn now. - // TODO(bar) use the aggregated connectivity state. - bw.cc.UpdateBalancerState(connectivity.Ready, bw) + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } return } @@ -276,27 +296,79 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) if err != nil { return nil, nil, err } - var put func(balancer.DoneInfo) + var done func(balancer.DoneInfo) if p != nil { - put = func(i balancer.DoneInfo) { p() } + done = func(i balancer.DoneInfo) { p() } } var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() if bw.pickfirst { - bw.mu.Lock() // Get the first sc in conns. for _, sc = range bw.conns { break } - bw.mu.Unlock() } else { - bw.mu.Lock() - sc = bw.conns[resolver.Address{ + var ok bool + sc, ok = bw.conns[resolver.Address{ Addr: a.Addr, Type: resolver.Backend, - ServerName: "", // TODO(bar) support servername. + ServerName: "", Metadata: a.Metadata, }] - bw.mu.Unlock() + if !ok && failfast { + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, Errorf(codes.Unavailable, "there is no connection available") + } } - return sc, put, nil + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 1c7d1c1359..4bd673cd50 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -125,16 +125,23 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, return nil } -// Invoke sends the RPC request on the wire and returns after response is received. -// Invoke is called by generated code. Also users can call Invoke directly when it -// is really needed in their use cases. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { if cc.dopts.unaryInt != nil { return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) } return invoke(ctx, method, args, reply, cc, opts...) } +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) { c := defaultCallInfo() mc := cc.GetMethodConfig(method) @@ -207,9 +214,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli err error t transport.ClientTransport stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the + // Record the done handler from Balancer.Get(...). It is called once the // RPC has completed or failed. - put func(balancer.DoneInfo) + done func(balancer.DoneInfo) ) // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ @@ -223,10 +230,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli callHdr.Creds = c.creds } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } - t, put, err = cc.getTransport(ctx, gopts) + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -246,14 +250,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } stream, err = t.NewStream(ctx, callHdr) if err != nil { - if put != nil { + if done != nil { if _, ok := err.(transport.ConnectionError); ok { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - put(balancer.DoneInfo{Err: err}) + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -265,12 +269,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put(balancer.DoneInfo{Err: err}) + done(balancer.DoneInfo{Err: err}) } // Retry a non-failfast RPC when // i) there is a connection error; or @@ -282,12 +286,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli } err = recvResponse(ctx, cc.dopts, t, c, stream, reply) if err != nil { - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put(balancer.DoneInfo{Err: err}) + done(balancer.DoneInfo{Err: err}) } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -298,12 +302,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } t.CloseStream(stream, nil) - if put != nil { + if done != nil { updateRPCInfoInContext(ctx, rpcInfo{ bytesSent: stream.BytesSent(), bytesReceived: stream.BytesReceived(), }) - put(balancer.DoneInfo{Err: err}) + done(balancer.DoneInfo{Err: err}) } return stream.Status().Err() } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 45ea87eb61..5f5aac41f1 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -20,6 +20,7 @@ package grpc import ( "errors" + "fmt" "math" "net" "reflect" @@ -30,11 +31,14 @@ import ( "golang.org/x/net/context" "golang.org/x/net/trace" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. "google.golang.org/grpc/stats" "google.golang.org/grpc/transport" ) @@ -100,6 +104,22 @@ const ( // DialOption configures how we set up the connection. type DialOption func(*dialOptions) +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + // WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. // The lower bound for window size is 64K and any value smaller than that will be ignored. func WithInitialWindowSize(s int32) DialOption { @@ -162,7 +182,18 @@ func WithBalancer(b Balancer) DialOption { } } +// WithBalancerBuilder is for testing only. Users using custom balancers should +// register their balancer and use service config to choose the balancer to use. +func WithBalancerBuilder(b balancer.Builder) DialOption { + // TODO(bar) remove this when switching balancer is done. + return func(o *dialOptions) { + o.balancerBuilder = b + } +} + // WithServiceConfig returns a DialOption which has a channel to read the service configuration. +// DEPRECATED: service config should be received through name resolver, as specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md func WithServiceConfig(c <-chan ServiceConfig) DialOption { return func(o *dialOptions) { o.scChan = c @@ -279,7 +310,7 @@ func WithUserAgent(s string) DialOption { } } -// WithKeepaliveParams returns a DialOption that specifies keepalive paramaters for the client transport. +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { return func(o *dialOptions) { o.copts.KeepaliveParams = kp @@ -323,13 +354,30 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * target: target, csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), } - cc.csEvltr = &connectivityStateEvaluator{csMgr: cc.csMgr} cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { opt(&cc.dopts) } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { @@ -383,52 +431,18 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.bs == nil { cc.dopts.bs = DefaultBackoffConfig } + cc.parsedTarget = parseTarget(cc.target) creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { cc.authority = cc.dopts.copts.Authority } else { - cc.authority = target + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint } - // TODO(bar) parse scheme and start resolver. - if cc.dopts.balancerBuilder != nil { - var credsClone credentials.TransportCredentials - if creds != nil { - credsClone = creds.Clone() - } - buildOpts := balancer.BuildOptions{ - DialCreds: credsClone, - Dialer: cc.dopts.copts.Dialer, - } - // Build should not take long time. So it's ok to not have a goroutine for it. - // TODO(bar) init balancer after first resolver result to support service config balancer. - cc.balancer = cc.dopts.balancerBuilder.Build(&ccBalancerWrapper{cc: cc}, buildOpts) - } else { - waitC := make(chan error, 1) - go func() { - defer close(waitC) - // No balancer, or no resolver within the balancer. Connect directly. - ac, err := cc.newAddrConn([]resolver.Address{{Addr: target}}) - if err != nil { - waitC <- err - return - } - if err := ac.connect(cc.dopts.block); err != nil { - waitC <- err - return - } - }() - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err - } - } - } if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. select { @@ -444,11 +458,25 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } - if cc.balancer != nil { - // Unblock balancer initialization with a fake resolver update. - // The balancer wrapper will not read the addresses, so an empty list works. - // TODO(bar) remove this after the real resolver is started. - cc.balancer.HandleResolvedAddrs([]resolver.Address{}, nil) + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + } + + if cc.dopts.balancerBuilder != nil { + cc.customBalancer = true + // Build should not take long time. So it's ok to not have a goroutine for it. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) } // A blocking dial blocks until the clientConn is ready. @@ -468,54 +496,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * return cc, nil } -// connectivityStateEvaluator gets updated by addrConns when their -// states transition, based on which it evaluates the state of -// ClientConn. -// Note: This code will eventually sit in the balancer in the new design. -type connectivityStateEvaluator struct { - csMgr *connectivityStateManager - mu sync.Mutex - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. -} - -// recordTransition records state change happening in every addrConn and based on -// that it evaluates what state the ClientConn is in. -// It can only transition between connectivity.Ready, connectivity.Connecting and connectivity.TransientFailure. Other states, -// Idle and connectivity.Shutdown are transitioned into by ClientConn; in the begining of the connection -// before any addrConn is created ClientConn is in idle state. In the end when ClientConn -// closes it is in connectivity.Shutdown state. -// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. -func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) { - cse.mu.Lock() - defer cse.mu.Unlock() - - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - cse.csMgr.updateState(connectivity.Ready) - return - } - if cse.numConnecting > 0 { - cse.csMgr.updateState(connectivity.Connecting) - return - } - cse.csMgr.updateState(connectivity.TransientFailure) -} - // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. type connectivityStateManager struct { @@ -564,23 +544,26 @@ type ClientConn struct { ctx context.Context cancel context.CancelFunc - target string - authority string - dopts dialOptions - csMgr *connectivityStateManager - csEvltr *connectivityStateEvaluator // This will eventually be part of balancer. + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager - balancer balancer.Balancer - - // TODO(bar) move the mutex and picker into a struct that does blocking pick(). - pmu sync.Mutex - picker balancer.Picker + customBalancer bool // If this is true, switching balancer will be disabled. + balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper + blockingpicker *pickerWrapper mu sync.RWMutex sc ServiceConfig + scRaw string conns map[*addrConn]struct{} // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters + mkp keepalive.ClientParameters + curBalancerName string + curAddresses []resolver.Address + balancerWrapper *ccBalancerWrapper } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -616,6 +599,7 @@ func (cc *ClientConn) scWatcher() { // TODO: load balance policy runtime change is ignored. // We may revist this decision in the future. cc.sc = sc + cc.scRaw = "" cc.mu.Unlock() case <-cc.ctx.Done(): return @@ -623,6 +607,71 @@ func (cc *ClientConn) scWatcher() { } } +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return + } + + // TODO(bar switching) when grpclb is submitted, check address type and start grpclb. + if !cc.customBalancer && cc.balancerWrapper == nil { + // No customBalancer was specified by DialOption, and this is the first + // time handling resolved addresses, create a pickfirst balancer. + builder := newPickfirstBuilder() + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + } + + // TODO(bar switching) compare addresses, if there's no update, don't notify balancer. + cc.curAddresses = addrs + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) +} + +// switchBalancer starts the switching from current balancer to the balancer with name. +func (cc *ClientConn) switchBalancer(name string) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return + } + grpclog.Infof("ClientConn switching balancer to %q", name) + + if cc.customBalancer { + grpclog.Infoln("ignoring service config balancer configuration: WithBalancer DialOption used instead") + return + } + + if cc.curBalancerName == name { + return + } + + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. + cc.balancerWrapper.close() + + builder := balancer.Get(name) + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v (this should never happen...)", name) + builder = newPickfirstBuilder() + } + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ @@ -631,7 +680,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { dopts: cc.dopts, } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.csEvltr = cc.csEvltr // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { @@ -658,60 +706,35 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { // connect starts to creating transport and also starts the transport monitor // goroutine for this ac. +// It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. // This was part of resetAddrConn, keep it here to make the diff look clean. -func (ac *addrConn) connect(block bool) error { +func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) ac.mu.Unlock() - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addrs[0].Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } - - if block { - if err := ac.resetTransport(false); err != nil { + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. ac.tearDown(err) } - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return e.Origin() - } - return err + return } - // Start to monitor the error status of transport. - go ac.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - ac.transportMonitor() - }() - } + ac.transportMonitor() + }() return nil } @@ -764,56 +787,26 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { return m } -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - var ( - ac *addrConn - put func(balancer.DoneInfo) - ) - if cc.balancer == nil { - // If balancer is nil, there should be only one addrConn available. - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - for ac = range cc.conns { - // Break after the first iteration to get the first addrConn. - break - } - cc.mu.RUnlock() - } else { - cc.pmu.Lock() - // TODO(bar) call pick on struct blockPicker instead of the real picker. - p := cc.picker - cc.pmu.Unlock() - - var ( - err error - sc balancer.SubConn - ) - sc, put, err = p.Pick(ctx, balancer.PickOptions{}) - if err != nil { - return nil, nil, toRPCErr(err) - } - if acbw, ok := sc.(*acBalancerWrapper); ok { - ac = acbw.getAddrConn() - } else if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put(balancer.DoneInfo{Err: errors.New("SubConn returned by pick cannot be recognized")}) - } - } - if ac == nil { - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, cc.balancer != nil, !opts.BlockingWait) +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) if err != nil { - if put != nil { - updateRPCInfoInContext(ctx, rpcInfo{bytesSent: false, bytesReceived: false}) - put(balancer.DoneInfo{Err: err}) - } - return nil, nil, err + return nil, nil, toRPCErr(err) } - return t, put, nil + return t, done, nil +} + +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js + cc.sc = sc + cc.mu.Unlock() + return nil } // Close tears down the ClientConn and all underlying connections. @@ -828,9 +821,18 @@ func (cc *ClientConn) Close() error { conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil cc.mu.Unlock() - if cc.balancer != nil { - cc.balancer.Close() + cc.blockingpicker.close() + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() } for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -850,8 +852,6 @@ type addrConn struct { events trace.EventLog acbw balancer.SubConn - csEvltr *connectivityStateEvaluator - mu sync.Mutex state connectivity.State // ready is closed and becomes nil when a new transport is up or failed @@ -893,34 +893,23 @@ func (ac *addrConn) errorf(format string, a ...interface{}) { } } -// resetTransport recreates a transport to the address for ac. -// For the old transport: -// - if drain is true, it will be gracefully closed. -// - otherwise, it will be closed. +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// // TODO(bar) make sure all state transitions are valid. -func (ac *addrConn) resetTransport(drain bool) error { +func (ac *addrConn) resetTransport() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } - oldState := ac.state - ac.state = connectivity.Connecting - ac.csEvltr.recordTransition(oldState, ac.state) - if ac.cc.balancer != nil { - ac.cc.balancer.HandleSubConnStateChange(ac.acbw, ac.state) - } - // TODO(bar) don't call balancer functions to handle subconn state change if ac.acbw is nil. if ac.ready != nil { close(ac.ready) ac.ready = nil } - t := ac.transport ac.transport = nil + ac.curAddr = resolver.Address{} ac.mu.Unlock() - if t != nil && !drain { - t.Close() - } ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp ac.cc.mu.RUnlock() @@ -937,16 +926,14 @@ func (ac *addrConn) resetTransport(drain bool) error { return errConnClosing } ac.printf("connecting") - oldState := ac.state - ac.state = connectivity.Connecting - ac.csEvltr.recordTransition(oldState, ac.state) - // TODO(bar) remove condition once we always have a balancer. - if ac.cc.balancer != nil { - ac.cc.balancer.HandleSubConnStateChange(ac.acbw, ac.state) + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) } // copy ac.addrs in case of race addrsIter := make([]resolver.Address, len(ac.addrs)) copy(addrsIter, ac.addrs) + copts := ac.dopts.copts ac.mu.Unlock() for _, addr := range addrsIter { ac.mu.Lock() @@ -956,17 +943,13 @@ func (ac *addrConn) resetTransport(drain bool) error { return errConnClosing } ac.mu.Unlock() - ctx, cancel := context.WithTimeout(ac.ctx, timeout) sinfo := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, } - newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts) - // Don't call cancel in success path due to a race in Go 1.6: - // https://github.com/golang/go/issues/15078. + newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout) if err != nil { - cancel() - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { return err } @@ -988,13 +971,13 @@ func (ac *addrConn) resetTransport(drain bool) error { newTransport.Close() return errConnClosing } - oldState = ac.state ac.state = connectivity.Ready - ac.csEvltr.recordTransition(oldState, ac.state) - if ac.cc.balancer != nil { - ac.cc.balancer.HandleSubConnStateChange(ac.acbw, ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + t := ac.transport ac.transport = newTransport + if t != nil { + t.Close() + } ac.curAddr = addr if ac.ready != nil { close(ac.ready) @@ -1004,12 +987,8 @@ func (ac *addrConn) resetTransport(drain bool) error { return nil } ac.mu.Lock() - oldState = ac.state ac.state = connectivity.TransientFailure - ac.csEvltr.recordTransition(oldState, ac.state) - if ac.cc.balancer != nil { - ac.cc.balancer.HandleSubConnStateChange(ac.acbw, ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) if ac.ready != nil { close(ac.ready) ac.ready = nil @@ -1033,58 +1012,35 @@ func (ac *addrConn) transportMonitor() { ac.mu.Lock() t := ac.transport ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + case <-t.Error(): + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return case <-t.GoAway(): ac.adjustParams(t.GetGoAwayReason()) - // If GoAway happens without any network I/O error, the underlying transport - // will be gracefully closed, and a new transport will be created. - // (The transport will be closed when all the pending RPCs finished or failed.) - // If GoAway and some network I/O error happen concurrently, the underlying transport - // will be closed, and a new transport will be created. - var drain bool - select { - case <-t.Error(): - default: - drain = true - } - if err := ac.resetTransport(drain); err != nil { - grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.adjustParams(t.GetGoAwayReason()) - default: - } - if err := ac.resetTransport(false); err != nil { - grpclog.Infof("get error from resetTransport %v, transportMonitor returning", err) - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return + default: + } + ac.mu.Lock() + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) } + return } } } @@ -1129,6 +1085,28 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans } } +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a @@ -1136,10 +1114,9 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.cancel() - ac.mu.Lock() - ac.curAddr = resolver.Address{} defer ac.mu.Unlock() + ac.curAddr = resolver.Address{} if err == errConnDrain && ac.transport != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or @@ -1150,13 +1127,9 @@ func (ac *addrConn) tearDown(err error) { if ac.state == connectivity.Shutdown { return } - oldState := ac.state ac.state = connectivity.Shutdown ac.tearDownErr = err - ac.csEvltr.recordTransition(oldState, ac.state) - if ac.cc.balancer != nil { - ac.cc.balancer.HandleSubConnStateChange(ac.acbw, ac.state) - } + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) if ac.events != nil { ac.events.Finish() ac.events = nil @@ -1165,9 +1138,6 @@ func (ac *addrConn) tearDown(err error) { close(ac.ready) ac.ready = nil } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } return } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 905b048e2a..b452a4ae8d 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -92,13 +92,11 @@ func (protoCodec) String() string { return "proto" } -var ( - protoBufferPool = &sync.Pool{ - New: func() interface{} { - return &cachedProtoBuffer{ - Buffer: proto.Buffer{}, - lastMarshaledSize: 16, - } - }, - } -) +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 2475fe8322..90b6a61170 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -34,10 +34,8 @@ import ( "golang.org/x/net/context" ) -var ( - // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2"} -) +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). @@ -74,11 +72,9 @@ type AuthInfo interface { AuthType() string } -var ( - // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC - // and the caller should not close rawConn. - ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") -) +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). @@ -131,15 +127,15 @@ func (c tlsCreds) Info() ProtocolInfo { } } -func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := cloneTLSConfig(c.config) if cfg.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") + colonPos := strings.LastIndex(authority, ":") if colonPos == -1 { - colonPos = len(addr) + colonPos = len(authority) } - cfg.ServerName = addr[:colonPos] + cfg.ServerName = authority[:colonPos] } conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go index a3421d99eb..de23098eb9 100644 --- a/vendor/google.golang.org/grpc/go17.go +++ b/vendor/google.golang.org/grpc/go17.go @@ -22,6 +22,7 @@ package grpc import ( "context" + "fmt" "io" "net" "net/http" @@ -41,7 +42,7 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error) func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { req = req.WithContext(ctx) if err := req.Write(conn); err != nil { - return err + return fmt.Errorf("failed to write the HTTP request: %v", err) } return nil } diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go index ebda2a1e5d..db56ff3621 100644 --- a/vendor/google.golang.org/grpc/grpclb.go +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -461,6 +461,7 @@ func (b *grpclbBalancer) Start(target string, config BalancerConfig) error { // WithDialer takes a different type of function, so we instead use a special DialOption here. dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer }) } + dopts = append(dopts, WithBlock()) ccError = make(chan struct{}) ctx, cancel := context.WithTimeout(context.Background(), time.Second) cc, err = DialContext(ctx, rb.addr, dopts...) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto index 2ed04551fa..42d99c109f 100644 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -15,7 +15,7 @@ syntax = "proto3"; package grpc.lb.v1; -option go_package = "messages"; +option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"; message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 0000000000..9085dbc9c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{blockingCh: make(chan struct{})} + return bp +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, put, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v", err) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + return t, put, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 0000000000..e4597cb86c --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return "pickfirst" +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..a543a709a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,377 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var errMissingAddr = errors.New("missing address") + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(string(sc)) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + var newAddrs []resolver.Address + newAddrs = d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + s := rand.NewSource(time.Now().UnixNano()) + r := rand.New(s) + if r.Intn(100)+1 > *a { + return false + } + return true +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 0000000000..b466bc8f6d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 0000000000..fa34f14cad --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000..b76010d74d --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 918a6c7dbd..0dd887fa54 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -24,7 +24,7 @@ var ( // m is a map from scheme to resolver builder. m = make(map[string]Builder) // defaultScheme is the default scheme to use. - defaultScheme string + defaultScheme = "passthrough" ) // TODO(bar) install dns resolver in init(){}. @@ -102,13 +102,21 @@ type ClientConn interface { NewServiceConfig(serviceConfig string) } +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. // // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. - Build(target string, cc ClientConn, opts BuildOption) (Resolver, error) + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) // Scheme returns the scheme supported by this resolver. // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string @@ -126,3 +134,10 @@ type Resolver interface { // Close closes the resolver. Close() } + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 0000000000..c07e174a84 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", s, false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/") + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme) + + rb := resolver.Get(cc.parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{}) + if err != nil { + return nil, err + } + go ccr.watcher() + return ccr, nil +} + +// watcher processes address updates and service config updates sequencially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) + case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 423d7d3cf4..9c8d88190e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -193,12 +193,11 @@ func Peer(peer *peer.Peer) CallOption { } // FailFast configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If failfast is true, the RPC will fail +// connections or unreachable servers. If failFast is true, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will retry -// the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. -// Note: failFast is default to true. +// connection is available (or the call is canceled or times out). Please refer +// to https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// The default behavior of RPCs is to fail fast. func FailFast(failFast bool) CallOption { return beforeCall(func(c *callInfo) error { c.failFast = failFast @@ -275,7 +274,10 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if length == 0 { return pf, nil, nil } - if length > uint32(maxReceiveMessageSize) { + if int64(length) > int64(maxInt) { + return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead @@ -440,80 +442,21 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } -// MethodConfig defines the configuration recommended by the service providers for a -// particular method. -// This is EXPERIMENTAL and subject to change. -type MethodConfig struct { - // WaitForReady indicates whether RPCs sent to this method should wait until - // the connection is ready by default (!failfast). The value specified via the - // gRPC client API will override the value set here. - WaitForReady *bool - // Timeout is the default timeout for RPCs sent to this method. The actual - // deadline used will be the minimum of the value specified here and the value - // set by the application via the gRPC client API. If either one is not set, - // then the other will be used. If neither is set, then the RPC has no deadline. - Timeout *time.Duration - // MaxReqSize is the maximum allowed payload size for an individual request in a - // stream (client->server) in bytes. The size which is measured is the serialized - // payload after per-message compression (but before stream compression) in bytes. - // The actual value used is the minumum of the value specified here and the value set - // by the application via the gRPC client API. If either one is not set, then the other - // will be used. If neither is set, then the built-in default is used. - MaxReqSize *int - // MaxRespSize is the maximum allowed payload size for an individual response in a - // stream (server->client) in bytes. - MaxRespSize *int -} - -// ServiceConfig is provided by the service provider and contains parameters for how -// clients that connect to the service should behave. -// This is EXPERIMENTAL and subject to change. -type ServiceConfig struct { - // LB is the load balancer the service providers recommends. The balancer specified - // via grpc.WithBalancer will override this. - LB Balancer - // Methods contains a map for the methods in this service. - // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. - // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. - // Otherwise, the method has no MethodConfig to use. - Methods map[string]MethodConfig -} - -func min(a, b *int) *int { - if *a < *b { - return a - } - return b -} - -func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { - if mcMax == nil && doptMax == nil { - return &defaultVal - } - if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) - } - if mcMax != nil { - return mcMax - } - return doptMax -} - -// SupportPackageIsVersion3 is referenced from generated protocol buffer files. -// The latest support package version is 4. -// SupportPackageIsVersion3 is kept for compability. It will be removed in the -// next support package version update. -const SupportPackageIsVersion3 = true - -// SupportPackageIsVersion4 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the grpc package. +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. // -// This constant may be renamed in the future if a change in the generated code -// requires a synchronised update of grpc-go and protoc-gen-go. This constant -// should not be referenced from any other code. -const SupportPackageIsVersion4 = true +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) // Version is the current grpc version. -const Version = "1.7.0-dev" +const Version = "1.8.0-dev" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 83fd168c7c..def301a19a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -96,6 +96,11 @@ type Server struct { cv *sync.Cond m map[string]*service // service name -> service info events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once } type options struct { @@ -116,6 +121,8 @@ type options struct { keepalivePolicy keepalive.EnforcementPolicy initialWindowSize int32 initialConnWindowSize int32 + writeBufferSize int + readBufferSize int } var defaultServerOptions = options{ @@ -126,6 +133,22 @@ var defaultServerOptions = options{ // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + // InitialWindowSize returns a ServerOption that sets window size for stream. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialWindowSize(s int32) ServerOption { @@ -260,7 +283,7 @@ func StatsHandler(h stats.Handler) ServerOption { // handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function has full access to the Context of the request and the -// stream, and the invocation passes through interceptors. +// stream, and the invocation bypasses interceptors. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return func(o *options) { o.unknownStreamDesc = &StreamDesc{ @@ -289,6 +312,8 @@ func NewServer(opt ...ServerOption) *Server { opts: opts, conns: make(map[io.Closer]bool), m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), } s.cv = sync.NewCond(&s.mu) s.ctx, s.cancel = context.WithCancel(context.Background()) @@ -400,11 +425,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { return ret } -var ( - // ErrServerStopped indicates that the operation is now illegal because of - // the server being stopped. - ErrServerStopped = errors.New("grpc: the server has been stopped") -) +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { if s.opts.creds == nil { @@ -469,6 +492,14 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() + + // If Stop or GracefulStop is called, block until they are done and return nil + select { + case <-s.quit: + <-s.done + return nil + default: + } return err } tempDelay = 0 @@ -524,6 +555,8 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -1034,6 +1067,16 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + s.mu.Lock() listeners := s.lis s.lis = nil @@ -1063,6 +1106,16 @@ func (s *Server) Stop() { // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 0000000000..cde6483348 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,199 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "time" + + "google.golang.org/grpc/grpclog" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// DEPRECATED: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// DEPRECATED: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB *string + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. + Methods map[string]MethodConfig +} + +func parseTimeout(t *string) (*time.Duration, error) { + if t == nil { + return nil, nil + } + d, err := time.ParseDuration(*t) + return &d, err +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + MethodConfig *[]jsonMC +} + +func parseServiceConfig(js string) (ServiceConfig, error) { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + } + if rsc.MethodConfig == nil { + return sc, nil + } + + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseTimeout(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + return sc, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newBool(b bool) *bool { + return &b +} + +func newInt(b int) *int { + return &b +} + +func newDuration(b time.Duration) *time.Duration { + return &b +} + +func newString(b string) *string { + return &b +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index e7a14d9226..d5aa2f793b 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -135,8 +135,6 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool - // WireLength is the wire length of header. - WireLength int // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -220,7 +218,7 @@ type outgoingTagsKey struct{} // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibilty with existing clients +// NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. @@ -230,7 +228,7 @@ func SetTags(ctx context.Context, b []byte) context.Context { // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibilty with existing clients +// NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. @@ -262,7 +260,7 @@ type outgoingTraceKey struct{} // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibilty with existing clients +// NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. @@ -272,7 +270,7 @@ func SetTrace(ctx context.Context, b []byte) context.Context { // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibilty with existing clients +// NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 9a1965a47e..b58f7f8d1d 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -94,20 +94,28 @@ type ClientStream interface { Stream } -// NewClientStream creates a new Stream for the client side. This is called -// by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { if cc.dopts.streamInt != nil { return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) } return newClientStream(ctx, desc, cc, method, opts...) } +// NewClientStream creates a new Stream for the client side. This is typically +// called by generated code. +// +// DEPRECATED: Use ClientConn.NewStream instead. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { var ( t transport.ClientTransport s *transport.Stream - put func(balancer.DoneInfo) + done func(balancer.DoneInfo) cancel context.CancelFunc ) c := defaultCallInfo() @@ -189,11 +197,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } }() } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } for { - t, put, err = cc.getTransport(ctx, gopts) + t, done, err = cc.getTransport(ctx, c.failFast) if err != nil { // TODO(zhaoq): Probably revisit the error handling. if _, ok := status.FromError(err); ok { @@ -211,15 +216,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth s, err = t.NewStream(ctx, callHdr) if err != nil { - if _, ok := err.(transport.ConnectionError); ok && put != nil { + if _, ok := err.(transport.ConnectionError); ok && done != nil { // If error is connection error, transport was sending data on wire, // and we are not sure if anything has been sent on wire. // If error is not connection error, we are sure nothing has been sent. updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false}) } - if put != nil { - put(balancer.DoneInfo{Err: err}) - put = nil + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil } if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast { continue @@ -241,10 +246,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth dc: cc.dopts.dc, cancel: cancel, - put: put, - t: t, - s: s, - p: &parser{r: s}, + done: done, + t: t, + s: s, + p: &parser{r: s}, tracing: EnableTracing, trInfo: trInfo, @@ -294,7 +299,7 @@ type clientStream struct { tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex - put func(balancer.DoneInfo) + done func(balancer.DoneInfo) closed bool finished bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), @@ -488,13 +493,13 @@ func (cs *clientStream) finish(err error) { for _, o := range cs.opts { o.after(cs.c) } - if cs.put != nil { + if cs.done != nil { updateRPCInfoInContext(cs.s.Context(), rpcInfo{ bytesSent: cs.s.BytesSent(), bytesReceived: cs.s.BytesReceived(), }) - cs.put(balancer.DoneInfo{Err: err}) - cs.put = nil + cs.done(balancer.DoneInfo{Err: err}) + cs.done = nil } if cs.statsHandler != nil { end := &stats.End{ @@ -658,3 +663,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return nil } + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + s, ok := transport.StreamFromContext(stream.Context()) + if !ok { + return "", ok + } + return s.Method(), ok +} diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go index 667edb80b2..63cd2627c8 100644 --- a/vendor/google.golang.org/grpc/transport/bdp_estimator.go +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -41,12 +41,9 @@ const ( gamma = 2 ) -var ( - // Adding arbitrary data to ping so that its ack can be - // identified. - // Easter-egg: what does the ping message say? - bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} -) +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} type bdpEstimator struct { // sentAt is the time when the ping was sent. @@ -59,7 +56,7 @@ type bdpEstimator struct { sample uint32 // bwMax is the maximum bandwidth noted so far (bytes/sec). bwMax float64 - // bool to keep track of the begining of a new measurement cycle. + // bool to keep track of the beginning of a new measurement cycle. isSent bool // Callback to update the window sizes. updateFlowControl func(n uint32) @@ -70,7 +67,7 @@ type bdpEstimator struct { } // timesnap registers the time bdp ping was sent out so that -// network rtt can be calculated when its ack is recieved. +// network rtt can be calculated when its ack is received. // It is called (by controller) when the bdpPing is // being written on the wire. func (b *bdpEstimator) timesnap(d [8]byte) { @@ -119,7 +116,7 @@ func (b *bdpEstimator) calculate(d [8]byte) { b.rtt += (rttSample - b.rtt) * float64(alpha) } b.isSent = false - // The number of bytes accumalated so far in the sample is smaller + // The number of bytes accumulated so far in the sample is smaller // than or equal to 1.5 times the real BDP on a saturated connection. bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) if bwCurrent > b.bwMax { diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index 77914de18b..dd1a8d42e7 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -26,6 +26,7 @@ import ( "time" "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" ) const ( @@ -56,7 +57,9 @@ const ( // control tasks, e.g., flow control, settings, streaming resetting, etc. type headerFrame struct { - p http2.HeadersFrameParam + streamID uint32 + hf []hpack.HeaderField + endStream bool } func (*headerFrame) item() {} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 0489fada52..f1f6caf89c 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -173,7 +173,6 @@ func (ht *serverHandlerTransport) do(fn func()) error { case <-ht.closedCh: return ErrConnClosing } - } } @@ -183,6 +182,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.mu.Unlock() return nil } + ht.streamDone = true ht.mu.Unlock() err := ht.do(func() { ht.writeCommonHeaders(s) @@ -223,9 +223,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } }) close(ht.writes) - ht.mu.Lock() - ht.streamDone = true - ht.mu.Unlock() return err } diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 4d811a8b98..90cb590180 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -43,7 +43,7 @@ import ( // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { ctx context.Context - target string // server name/addr + cancel context.CancelFunc userAgent string md interface{} conn net.Conn // underlying communication channel @@ -52,13 +52,6 @@ type http2Client struct { authInfo credentials.AuthInfo // auth info about the connection nextID uint32 // the next stream ID to be used - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} @@ -149,9 +142,20 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) { +func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) { scheme := "http" - conn, err := dial(ctx, opts.Dialer, addr.Addr) + ctx, cancel := context.WithCancel(ctx) + connectCtx, connectCancel := context.WithTimeout(ctx, timeout) + defer func() { + if err != nil { + cancel() + // Don't call connectCancel in success path due to a race in Go 1.6: + // https://github.com/golang/go/issues/15078. + connectCancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -170,7 +174,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( ) if creds := opts.TransportCredentials; creds != nil { scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn) + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) if err != nil { // Credentials handshake errors are typically considered permanent // to avoid retrying on e.g. bad certificates. @@ -193,9 +197,18 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( icwz = opts.InitialConnWindowSize dynamicWindow = false } + var buf bytes.Buffer + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } t := &http2Client{ ctx: ctx, - target: addr.Addr, + cancel: cancel, userAgent: opts.UserAgent, md: addr.Metadata, conn: conn, @@ -204,11 +217,11 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( authInfo: authInfo, // The client initiated stream id is odd starting from 1. nextID: 1, - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), goAway: make(chan struct{}), awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn), + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + framer: newFramer(conn, writeBufSize, readBufSize), controlBuf: newControlBuffer(), fc: &inFlow{limit: uint32(icwz)}, sendQuotaPool: newQuotaPool(defaultWindowSize), @@ -281,7 +294,10 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) ( } } t.framer.writer.Flush() - go loopyWriter(t.controlBuf, t.shutdownChan, t.itemHandler) + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() if t.kp.Time != infinity { go t.keepalive() } @@ -361,13 +377,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea authData[k] = v } } - callAuthData := make(map[string]string) + callAuthData := map[string]string{} // Check if credentials.PerRPCCredentials were provided via call options. // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton") + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { @@ -393,7 +409,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return nil, ErrConnClosing } t.mu.Unlock() - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -401,40 +417,40 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if sq > 1 { t.streamsQuota.add(sq - 1) } - // HPACK encodes various headers. - hBuf := bytes.NewBuffer([]byte{}) - hEnc := hpack.NewEncoder(hBuf) - hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.SendCompress != "" { - hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := dl.Sub(time.Now()) - hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } - for k, v := range authData { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } for k, v := range callAuthData { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - var ( - endHeaders bool - ) if b := stats.OutgoingTags(ctx); b != nil { - hEnc.WriteField(hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) } if b := stats.OutgoingTrace(ctx); b != nil { - hEnc.WriteField(hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } if md, ok := metadata.FromOutgoingContext(ctx); ok { for k, vv := range md { @@ -443,7 +459,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -453,7 +469,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea continue } for _, v := range vv { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } @@ -482,34 +498,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea default: } } - first := true - bufLen := hBuf.Len() - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - t.controlBuf.put(&headerFrame{p}) - first = false - } else { - // Sends Continuation frames for the leftover headers. - t.controlBuf.put(&continuationFrame{streamID: s.id, endHeaders: endHeaders, headerBlockFragment: hBuf.Next(size)}) - } - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) t.mu.Unlock() s.mu.Lock() @@ -519,7 +512,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if t.statsHandler != nil { outHeader := &stats.OutHeader{ Client: true, - WireLength: bufLen, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, @@ -596,12 +588,9 @@ func (t *http2Client) Close() (err error) { t.mu.Unlock() return } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } t.state = closing t.mu.Unlock() - close(t.shutdownChan) + t.cancel() err = t.conn.Close() t.mu.Lock() streams := t.activeStreams @@ -623,23 +612,18 @@ func (t *http2Client) Close() (err error) { } t.statsHandler.HandleConn(t.ctx, connEnd) } - return + return err } +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. func (t *http2Client) GracefulClose() error { t.mu.Lock() switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - if t.state == draining { + case closing, draining: t.mu.Unlock() return nil } @@ -658,7 +642,7 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e select { case <-s.ctx.Done(): return ContextErr(s.ctx.Err()) - case <-t.shutdownChan: + case <-t.ctx.Done(): return ErrConnClosing default: } @@ -675,44 +659,51 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e } hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] + var ( + streamQuota int + streamQuotaVer uint32 + localSendQuota int + err error + sqChan <-chan int + ) for idx, r := range [][]byte{hdr, data} { for len(r) > 0 { size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, quotaChan) - if err != nil { - return err + if size > len(r) { + size = len(r) } + if streamQuota == 0 { // Used up all the locally cached stream quota. + sqChan, streamQuotaVer = s.sendQuotaPool.acquireWithVersion() + // Wait until the stream has some quota to send the data. + streamQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, sqChan) + if err != nil { + return err + } + } + if localSendQuota <= 0 { // Being a soft limit, it can go negative. + // Acquire local send quota to be able to write to the controlBuf. + localSendQuota, err = wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire()) + if err != nil { + return err + } + } + if size > streamQuota { + size = streamQuota + } // No need to do that for localSendQuota since that's only a soft limit. // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire()) if err != nil { return err } - if sq < size { - size = sq - } if tq < size { size = tq } - if size > len(r) { - size = len(r) + if tq > size { // Overbooked transport quota. Return it back. + t.sendQuotaPool.add(tq - size) } + streamQuota -= size + localSendQuota -= size p := r[:size] - ps := len(p) - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err - } - s.localSendQuota.add(ltq - ps) // It's ok if we make it negative. var endStream bool // See if this is the last frame to be written. if opts.Last { @@ -727,21 +718,28 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e } } success := func() { - t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }}) - if ps < sq { - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] + sz := size + t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(sz) }}) + r = r[size:] } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) + failure := func() { // The stream quota version must have changed. + // Our streamQuota cache is invalidated now, so give it back. + s.sendQuotaPool.lockedAdd(streamQuota + size) } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) + if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) { + // Couldn't send this chunk out. + t.sendQuotaPool.add(size) + localSendQuota += size + streamQuota = 0 } } } + if streamQuota > 0 { // Add the left over quota back to stream. + s.sendQuotaPool.add(streamQuota) + } + if localSendQuota > 0 { + s.localSendQuota.add(localSendQuota) + } if !opts.Last { return nil } @@ -770,7 +768,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { return } if w := s.fc.maybeAdjust(n); w > 0 { - // Piggyback conneciton's window update along. + // Piggyback connection's window update along. if cw := t.fc.resetPendingUpdate(); cw > 0 { t.controlBuf.put(&windowUpdate{0, cw}) } @@ -841,7 +839,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.controlBuf.put(bdpPing) } else { if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(connectionErrorf(true, err, "%v", err)) + t.Close() return } if w := t.fc.onRead(uint32(size)); w > 0 { @@ -920,15 +918,28 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { s.write(recvMsg{err: io.EOF}) } -func (t *http2Client) handleSettings(f *http2.SettingsFrame) { +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { if f.IsAck() { return } var ss []http2.Setting + isMaxConcurrentStreamsMissing := true f.ForeachSetting(func(s http2.Setting) error { + if s.ID == http2.SettingMaxConcurrentStreams { + isMaxConcurrentStreamsMissing = false + } ss = append(ss, s) return nil }) + if isFirst && isMaxConcurrentStreamsMissing { + // This means server is imposing no limits on + // maximum number of concurrent streams initiated by client. + // So we must remove our self-imposed limit. + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: math.MaxUint32, + }) + } // The settings will be applied once the ack is sent. t.controlBuf.put(&settings{ack: true, ss: ss}) } @@ -958,10 +969,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 != 1 { t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) + t.Close() return } - // A client can recieve multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). + // A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387). // The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay // with the ID of the last stream the server will process. // Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we @@ -972,7 +983,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) + t.Close() return } default: @@ -1118,16 +1129,16 @@ func (t *http2Client) reader() { // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { - t.notifyError(err) + t.Close() return } atomic.CompareAndSwapUint32(&t.activity, 0, 1) sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.notifyError(err) + t.Close() return } - t.handleSettings(sf) + t.handleSettings(sf, true) // loop to keep reading incoming messages on this transport. for { @@ -1148,7 +1159,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.notifyError(err) + t.Close() return } } @@ -1160,7 +1171,7 @@ func (t *http2Client) reader() { case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: - t.handleSettings(frame) + t.handleSettings(frame, false) case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: @@ -1200,13 +1211,11 @@ func (t *http2Client) applySettings(ss []http2.Setting) { } } +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. func (t *http2Client) itemHandler(i item) error { var err error - defer func() { - if err != nil { - t.notifyError(err) - } - }() switch i := i.(type) { case *dataFrame: err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) @@ -1214,9 +1223,38 @@ func (t *http2Client) itemHandler(i item) error { i.f() } case *headerFrame: - err = t.framer.fr.WriteHeaders(i.p) - case *continuationFrame: - err = t.framer.fr.WriteContinuation(i.streamID, i.endHeaders, i.headerBlockFragment) + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + endHeaders := false + first := true + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } case *windowUpdate: err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) case *settings: @@ -1242,7 +1280,7 @@ func (t *http2Client) itemHandler(i item) error { } err = t.framer.fr.WritePing(i.ack, i.data) default: - errorf("transport: http2Client.controller got unexpected item type %v\n", i) + errorf("transport: http2Client.controller got unexpected item type %v", i) } return err } @@ -1268,7 +1306,7 @@ func (t *http2Client) keepalive() { case <-t.awakenKeepalive: // If the control gets here a ping has been sent // need to reset the timer with keepalive.Timeout. - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } else { @@ -1287,13 +1325,13 @@ func (t *http2Client) keepalive() { } t.Close() return - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } return } - case <-t.shutdownChan: + case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } @@ -1303,25 +1341,9 @@ func (t *http2Client) keepalive() { } func (t *http2Client) Error() <-chan struct{} { - return t.errorChan + return t.ctx.Done() } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - infof("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 80ddd5fb5e..6582024040 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -52,17 +52,16 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { ctx context.Context + cancel context.CancelFunc conn net.Conn remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer + framer *framer + hBuf *bytes.Buffer // the buffer for HPACK encoding + hEnc *hpack.Encoder // HPACK encoder // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -105,7 +104,7 @@ type http2Server struct { // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 // idle is the time instant when the connection went idle. - // This is either the begining of the connection or when the number of + // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time @@ -114,7 +113,15 @@ type http2Server struct { // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - framer := newFramer(conn) + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) // Send initial settings as connection preface to client. var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is @@ -175,20 +182,24 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + var buf bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) t := &http2Server{ - ctx: context.Background(), + ctx: ctx, + cancel: cancel, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), authInfo: config.AuthInfo, framer: framer, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), maxStreams: maxStreams, inTapHandle: config.InTapHandle, controlBuf: newControlBuffer(), fc: &inFlow{limit: uint32(icwz)}, sendQuotaPool: newQuotaPool(defaultWindowSize), state: reachable, - shutdownChan: make(chan struct{}), activeStreams: make(map[uint32]*Stream), streamSendQuota: defaultWindowSize, stats: config.StatsHandler, @@ -212,7 +223,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.stats.HandleConn(t.ctx, connBegin) } t.framer.writer.Flush() - go loopyWriter(t.controlBuf, t.shutdownChan, t.itemHandler) + go func() { + loopyWriter(t.ctx, t.controlBuf, t.itemHandler) + t.Close() + }() go t.keepalive() return t, nil } @@ -639,7 +653,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should - // have come after atleast defaultPingTimeout. + // have come after at least defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } @@ -669,40 +683,12 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - // Sends the headers in a single batch. - for !endHeaders { - size := b.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - t.controlBuf.put(&headerFrame{p}) - first = false - } else { - t.controlBuf.put(&continuationFrame{streamID: s.id, endHeaders: endHeaders, headerBlockFragment: b.Next(size)}) - } - } - atomic.StoreUint32(&t.resetPingStrikes, 1) - return nil -} - // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { select { case <-s.ctx.Done(): return ContextErr(s.ctx.Err()) - case <-t.shutdownChan: + case <-t.ctx.Done(): return ErrConnClosing default: } @@ -722,13 +708,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } md = s.header s.mu.Unlock() - - hBuf := bytes.NewBuffer([]byte{}) // TODO(mmukhi): Try and re-use this memory later. - hEnc := hpack.NewEncoder(hBuf) - hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) if s.sendCompress != "" { - hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, vv := range md { if isReservedHeader(k) { @@ -736,16 +722,17 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { continue } for _, v := range vv { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := hBuf.Len() - if err := t.writeHeaders(s, hBuf, false); err != nil { - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + }) if t.stats != nil { outHeader := &stats.OutHeader{ - WireLength: bufLen, + //WireLength: // TODO(mmukhi): Revisit this later, if needed. } t.stats.HandleRPC(s.Context(), outHeader) } @@ -758,7 +745,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { select { - case <-t.shutdownChan: + case <-t.ctx.Done(): return ErrConnClosing default: } @@ -782,18 +769,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headersSent = true } - hBuf := bytes.NewBuffer([]byte{}) // TODO(mmukhi): Try and re-use this memory. - hEnc := hpack.NewEncoder(hBuf) + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !headersSent { - hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) } - hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(st.Code())), - }) - hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) @@ -802,7 +786,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { panic(err) } - hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } // Attach the trailer metadata. @@ -812,19 +796,16 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { continue } for _, v := range vv { - hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } - bufLen := hBuf.Len() - if err := t.writeHeaders(s, hBuf, true); err != nil { - t.Close() - return err - } + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + }) if t.stats != nil { - outTrailer := &stats.OutTrailer{ - WireLength: bufLen, - } - t.stats.HandleRPC(s.Context(), outTrailer) + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } t.closeStream(s) return nil @@ -832,11 +813,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) { +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { select { case <-s.ctx.Done(): return ContextErr(s.ctx.Err()) - case <-t.shutdownChan: + case <-t.ctx.Done(): return ErrConnClosing default: } @@ -861,67 +842,79 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) ( } hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] + var ( + streamQuota int + streamQuotaVer uint32 + localSendQuota int + err error + sqChan <-chan int + ) for _, r := range [][]byte{hdr, data} { for len(r) > 0 { size := http2MaxFrameLen - // Wait until the stream has some quota to send the data. - quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion() - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, quotaChan) - if err != nil { - return err + if size > len(r) { + size = len(r) } + if streamQuota == 0 { // Used up all the locally cached stream quota. + sqChan, streamQuotaVer = s.sendQuotaPool.acquireWithVersion() + // Wait until the stream has some quota to send the data. + streamQuota, err = wait(s.ctx, t.ctx, nil, nil, sqChan) + if err != nil { + return err + } + } + if localSendQuota <= 0 { + localSendQuota, err = wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire()) + if err != nil { + return err + } + } + if size > streamQuota { + size = streamQuota + } // No need to do that for localSendQuota since that's only a soft limit. // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire()) if err != nil { return err } - if sq < size { - size = sq - } if tq < size { size = tq } - if size > len(r) { - size = len(r) + if tq > size { + t.sendQuotaPool.add(tq - size) } + streamQuota -= size + localSendQuota -= size p := r[:size] - ps := len(p) - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - // Acquire local send quota to be able to write to the controlBuf. - ltq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.localSendQuota.acquire()) - if err != nil { - if _, ok := err.(ConnectionError); !ok { - t.sendQuotaPool.add(ps) - } - return err - } - s.localSendQuota.add(ltq - ps) // It's ok we make this negative. // Reset ping strikes when sending data since this might cause // the peer to send ping. atomic.StoreUint32(&t.resetPingStrikes, 1) success := func() { + sz := size t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() { - //fmt.Println("Adding quota back to localEendQuota", ps) - s.localSendQuota.add(ps) + s.localSendQuota.add(sz) }}) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.lockedAdd(sq - ps) - } - r = r[ps:] + r = r[size:] } - failure := func() { - s.sendQuotaPool.lockedAdd(sq) + failure := func() { // The stream quota version must have changed. + // Our streamQuota cache is invalidated now, so give it back. + s.sendQuotaPool.lockedAdd(streamQuota + size) } - if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) { - t.sendQuotaPool.add(ps) - s.localSendQuota.add(ps) + if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) { + // Couldn't send this chunk out. + t.sendQuotaPool.add(size) + localSendQuota += size + streamQuota = 0 } } } + if streamQuota > 0 { + // ADd the left over quota back to stream. + s.sendQuotaPool.add(streamQuota) + } + if localSendQuota > 0 { + s.localSendQuota.add(localSendQuota) + } return nil } @@ -938,7 +931,7 @@ func (t *http2Server) keepalive() { maxAge := time.NewTimer(t.kp.MaxConnectionAge) keepalive := time.NewTimer(t.kp.Time) // NOTE: All exit paths of this function should reset their - // respecitve timers. A failure to do so will cause the + // respective timers. A failure to do so will cause the // following clean-up to deadlock and eventually leak. defer func() { if !maxIdle.Stop() { @@ -981,7 +974,7 @@ func (t *http2Server) keepalive() { t.Close() // Reseting the timer so that the clean-up doesn't deadlock. maxAge.Reset(infinity) - case <-t.shutdownChan: + case <-t.ctx.Done(): } return case <-keepalive.C: @@ -999,7 +992,7 @@ func (t *http2Server) keepalive() { pingSent = true t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) - case <-t.shutdownChan: + case <-t.ctx.Done(): return } } @@ -1007,35 +1000,63 @@ func (t *http2Server) keepalive() { var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} +// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer) +// is duplicated between the client and the server. +// The transport layer needs to be refactored to take care of this. func (t *http2Server) itemHandler(i item) error { - var err error - defer func() { - if err != nil { - t.Close() - errorf("transport: Error while writing: %v", err) - } - }() switch i := i.(type) { case *dataFrame: - err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d) - if err == nil { - i.f() + if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil { + return err } + i.f() + return nil case *headerFrame: - err = t.framer.fr.WriteHeaders(i.p) - case *continuationFrame: - err = t.framer.fr.WriteContinuation(i.streamID, i.endHeaders, i.headerBlockFragment) + t.hBuf.Reset() + for _, f := range i.hf { + t.hEnc.WriteField(f) + } + first := true + endHeaders := false + for !endHeaders { + size := t.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + var err error + if first { + first = false + err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: i.streamID, + BlockFragment: t.hBuf.Next(size), + EndStream: i.endStream, + EndHeaders: endHeaders, + }) + } else { + err = t.framer.fr.WriteContinuation( + i.streamID, + endHeaders, + t.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + atomic.StoreUint32(&t.resetPingStrikes, 1) + return nil case *windowUpdate: - err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) + return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment) case *settings: if i.ack { t.applySettings(i.ss) - err = t.framer.fr.WriteSettingsAck() - } else { - err = t.framer.fr.WriteSettings(i.ss...) + return t.framer.fr.WriteSettingsAck() } + return t.framer.fr.WriteSettings(i.ss...) case *resetStream: - err = t.framer.fr.WriteRSTStream(i.streamID, i.code) + return t.framer.fr.WriteRSTStream(i.streamID, i.code) case *goAway: t.mu.Lock() if t.state == closing { @@ -1048,15 +1069,13 @@ func (t *http2Server) itemHandler(i item) error { // Stop accepting more streams now. t.state = draining t.mu.Unlock() - err = t.framer.fr.WriteGoAway(sid, i.code, i.debugData) - if err != nil { + if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil { return err } if i.closeConn { - // Abruptly close the connection following the GoAway. - // But flush out what's inside the buffer first. + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - t.Close() return fmt.Errorf("transport: Connection closing") } return nil @@ -1068,36 +1087,42 @@ func (t *http2Server) itemHandler(i item) error { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - err = t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}) - err = t.framer.fr.WritePing(false, goAwayPing.data) + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return err + } go func() { timer := time.NewTimer(time.Minute) defer timer.Stop() select { case <-t.drainChan: case <-timer.C: - case <-t.shutdownChan: + case <-t.ctx.Done(): return } t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData}) }() + return nil case *flushIO: - err = t.framer.writer.Flush() + return t.framer.writer.Flush() case *ping: if !i.ack { t.bdpEst.timesnap(i.data) } - err = t.framer.fr.WritePing(i.ack, i.data) + return t.framer.fr.WritePing(i.ack, i.data) default: - errorf("transport: http2Server.controller got unexpected item type %v\n", i) + err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i) + errorf("%v", err) + return err } - return err } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { +func (t *http2Server) Close() error { t.mu.Lock() if t.state == closing { t.mu.Unlock() @@ -1107,8 +1132,8 @@ func (t *http2Server) Close() (err error) { streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() + t.cancel() + err := t.conn.Close() // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1117,7 +1142,7 @@ func (t *http2Server) Close() (err error) { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return + return err } // closeStream clears the footprint of a stream when the stream is not needed diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 831813fda7..39f878cfd5 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -44,7 +44,8 @@ const ( // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 ) var ( @@ -474,10 +475,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn) *framer { +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), + reader: bufio.NewReaderSize(conn, readBufferSize), + writer: bufio.NewWriterSize(conn, writeBufferSize), } f.fr = http2.NewFramer(f.writer, f.reader) // Opt-in to Frame reuse API on framer to reduce garbage. diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 6aabbd3ff8..e0651c4d2b 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -25,6 +25,7 @@ import ( "io" "net" "sync" + "time" "golang.org/x/net/context" "golang.org/x/net/http2" @@ -67,20 +68,20 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) + b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -89,6 +90,7 @@ func (b *recvBuffer) load() { default: } } + b.mu.Unlock() } // get returns the channel that receives a recvMsg in the buffer. @@ -164,20 +166,20 @@ func newControlBuffer() *controlBuffer { func (b *controlBuffer) put(r item) { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) == 0 { select { case b.c <- r: + b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) + b.mu.Unlock() } func (b *controlBuffer) load() { b.mu.Lock() - defer b.mu.Unlock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -186,6 +188,7 @@ func (b *controlBuffer) load() { default: } } + b.mu.Unlock() } // get returns the channel that receives an item in the buffer. @@ -314,8 +317,9 @@ func (s *Stream) Header() (metadata.MD, error) { // side only. func (s *Stream) Trailer() metadata.MD { s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() + c := s.trailer.Copy() + s.mu.RUnlock() + return c } // ServerTransport returns the underlying ServerTransport for the stream. @@ -343,14 +347,16 @@ func (s *Stream) Status() *status.Status { // Server side only. func (s *Stream) SetHeader(md metadata.MD) error { s.mu.Lock() - defer s.mu.Unlock() if s.headerOk || s.state == streamDone { + s.mu.Unlock() return ErrIllegalHeaderWrite } if md.Len() == 0 { + s.mu.Unlock() return nil } s.header = metadata.Join(s.header, md) + s.mu.Unlock() return nil } @@ -361,8 +367,8 @@ func (s *Stream) SetTrailer(md metadata.MD) error { return nil } s.mu.Lock() - defer s.mu.Unlock() s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() return nil } @@ -413,15 +419,17 @@ func (s *Stream) finish(st *status.Status) { // BytesSent indicates whether any bytes have been sent on this stream. func (s *Stream) BytesSent() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesSent + bs := s.bytesSent + s.mu.Unlock() + return bs } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { s.mu.Lock() - defer s.mu.Unlock() - return s.bytesReceived + br := s.bytesReceived + s.mu.Unlock() + return br } // GoString is implemented by Stream so context.String() won't @@ -450,7 +458,6 @@ type transportState int const ( reachable transportState = iota - unreachable closing draining ) @@ -465,6 +472,8 @@ type ServerConfig struct { KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -492,22 +501,27 @@ type ConnectOptions struct { KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler - // InitialWindowSize sets the intial window size for a stream. + // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 - // InitialConnWindowSize sets the intial window size for a connection. + // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { - Addr string - Metadata interface{} + Addr string + Metadata interface{} + Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) +func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts, timeout) } // Options provides additional hints and information for message @@ -519,7 +533,7 @@ type Options struct { // Delay is a hint to the transport implementation for whether // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. + // transport implementation may ignore the hint. Delay bool } @@ -689,14 +703,13 @@ func (e StreamError) Error() string { return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) } -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { +// wait blocks until it can receive from one of the provided contexts or +// channels. ctx is the context of the RPC, tctx is the context of the +// transport, done is a channel closed to indicate the end of the RPC, goAway +// is a channel closed to indicate a GOAWAY was received, and proceed is a +// quota channel, whose received value is returned from this function if none +// of the other signals occur first. +func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) @@ -704,7 +717,7 @@ func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <- return 0, io.EOF case <-goAway: return 0, ErrStreamDrain - case <-closing: + case <-tctx.Done(): return 0, ErrConnClosing case i := <-proceed: return i, nil @@ -720,13 +733,13 @@ const ( // NoReason is the default value when GoAway frame is received. NoReason GoAwayReason = 1 // TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm - // was recieved and that the debug data said "too_many_pings". + // was received and that the debug data said "too_many_pings". TooManyPings GoAwayReason = 2 ) // loopyWriter is run in a separate go routine. It is the single code path that will // write data on wire. -func loopyWriter(cbuf *controlBuffer, done chan struct{}, handler func(item) error) { +func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) { for { select { case i := <-cbuf.get(): @@ -734,7 +747,7 @@ func loopyWriter(cbuf *controlBuffer, done chan struct{}, handler func(item) err if err := handler(i); err != nil { return } - case <-done: + case <-ctx.Done(): return } hasData: @@ -745,7 +758,7 @@ func loopyWriter(cbuf *controlBuffer, done chan struct{}, handler func(item) err if err := handler(i); err != nil { return } - case <-done: + case <-ctx.Done(): return default: if err := handler(&flushIO{}); err != nil { diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 5cdde60996..72ef3290ad 100755 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -14,6 +14,8 @@ if git status --porcelain | read; then die "Uncommitted or untracked files found; commit changes first" fi +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + # Check proto in manual runs or cron runs. if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then check_proto="true" @@ -62,13 +64,12 @@ trap cleanup EXIT git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' set +o pipefail # TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. -# TODO: Remove clientconn exception once go1.6 support is removed. -go tool vet -all . 2>&1 | grep -vE 'clientconn.go:.*cancel' | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) set -o pipefail git reset --hard HEAD if [[ "$check_proto" = "true" ]]; then - PATH=/home/travis/bin:$PATH make proto && \ + PATH="/home/travis/bin:$PATH" make proto && \ git status --porcelain 2>&1 | (! read) || \ (git status; git --no-pager diff; exit 1) fi diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD index 50f9237a47..80e205320a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + importpath = "k8s.io/apimachinery/pkg/api/errors", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -27,7 +25,7 @@ go_library( "doc.go", "errors.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/api/errors", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -35,3 +33,16 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index e664b2015b..af32c1fdf7 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -14,13 +14,12 @@ reviewers: - erictune - saad-ali - janetkuo -- timstclair +- tallclair - eparis - timothysc - dims - hongchaodeng - krousey -- satnam6502 - cjcullen - david-mcmahon - goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 560c889b9c..d5503fac5d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -28,14 +28,10 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -// HTTP Status codes not in the golang http package. const ( - StatusUnprocessableEntity = 422 - StatusTooManyRequests = 429 - // StatusServerTimeout is an indication that a transient server error has - // occurred and the client *should* retry, with an optional Retry-After - // header to specify the back off window. - StatusServerTimeout = 504 + // StatusTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. + StatusTooManyRequests = 429 ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -138,6 +134,14 @@ func NewUnauthorized(reason string) *StatusError { // NewForbidden returns an error indicating the requested action was forbidden func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + var message string + if qualifiedResource.Empty() { + message = fmt.Sprintf("forbidden: %v", err) + } else if name == "" { + message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err) + } else { + message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err) + } return &StatusError{metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusForbidden, @@ -147,7 +151,7 @@ func NewForbidden(qualifiedResource schema.GroupResource, name string, err error Kind: qualifiedResource.Resource, Name: name, }, - Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), + Message: message, }} } @@ -176,6 +180,17 @@ func NewGone(message string) *StatusError { }} } +// NewResourceExpired creates an error that indicates that the requested resource content has expired from +// the server (usually due to a resourceVersion that is too old). +func NewResourceExpired(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonExpired, + Message: message, + }} +} + // NewInvalid returns an error indicating the item is invalid and cannot be processed. func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { causes := make([]metav1.StatusCause, 0, len(errs)) @@ -189,7 +204,7 @@ func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorLis } return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity + Code: http.StatusUnprocessableEntity, Reason: metav1.StatusReasonInvalid, Details: &metav1.StatusDetails{ Group: qualifiedKind.Group, @@ -211,6 +226,21 @@ func NewBadRequest(reason string) *StatusError { }} } +// NewTooManyRequests creates an error that indicates that the client must try again later because +// the specified endpoint is not accepting requests. More specific details should be provided +// if client should know why the failure was limited4. +func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: message, + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + // NewServiceUnavailable creates an error that indicates that the requested service is unavailable. func NewServiceUnavailable(reason string) *StatusError { return &StatusError{metav1.Status{ @@ -276,7 +306,7 @@ func NewInternalError(err error) *StatusError { func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusServerTimeout, + Code: http.StatusGatewayTimeout, Reason: metav1.StatusReasonTimeout, Message: fmt.Sprintf("Timeout: %s", message), Details: &metav1.StatusDetails{ @@ -285,6 +315,18 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { }} } +// NewTooManyRequestsError returns an error indicating that the request was rejected because +// the server has received too many requests. Client should wait and retry. But if the request +// is perishable, then the client should not retry the request. +func NewTooManyRequestsError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: fmt.Sprintf("Too many requests: %s", message), + }} +} + // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { reason := metav1.StatusReasonUnknown @@ -313,14 +355,14 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusMethodNotAllowed: reason = metav1.StatusReasonMethodNotAllowed message = "the server does not allow this method on the requested resource" - case StatusUnprocessableEntity: + case http.StatusUnprocessableEntity: reason = metav1.StatusReasonInvalid message = "the server rejected our request due to an error in our request" - case StatusServerTimeout: - reason = metav1.StatusReasonServerTimeout - message = "the server cannot complete the requested operation at this time, try again later" - case StatusTooManyRequests: + case http.StatusGatewayTimeout: reason = metav1.StatusReasonTimeout + message = "the server was unable to return a response in the time allotted, but may still be processing the request" + case http.StatusTooManyRequests: + reason = metav1.StatusReasonTooManyRequests message = "the server has received too many requests and has asked us to try again later" default: if code >= 500 { @@ -381,12 +423,28 @@ func IsInvalid(err error) bool { return reasonForError(err) == metav1.StatusReasonInvalid } +// IsGone is true if the error indicates the requested resource is no longer available. +func IsGone(err error) bool { + return reasonForError(err) == metav1.StatusReasonGone +} + +// IsResourceExpired is true if the error indicates the resource has expired and the current action is +// no longer possible. +func IsResourceExpired(err error) bool { + return reasonForError(err) == metav1.StatusReasonExpired +} + // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { return reasonForError(err) == metav1.StatusReasonMethodNotAllowed } +// IsServiceUnavailable is true if the error indicates the underlying service is no longer available. +func IsServiceUnavailable(err error) bool { + return reasonForError(err) == metav1.StatusReasonServiceUnavailable +} + // IsBadRequest determines if err is an error which indicates that the request is invalid. func IsBadRequest(err error) bool { return reasonForError(err) == metav1.StatusReasonBadRequest @@ -423,11 +481,13 @@ func IsInternalError(err error) bool { // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. -// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field func IsTooManyRequests(err error) bool { + if reasonForError(err) == metav1.StatusReasonTooManyRequests { + return true + } switch t := err.(type) { case APIStatus: - return t.Status().Code == StatusTooManyRequests + return t.Status().Code == http.StatusTooManyRequests } return false } @@ -455,13 +515,20 @@ func IsUnexpectedObjectError(err error) bool { } // SuggestsClientDelay returns true if this error suggests a client delay as well as the -// suggested seconds to wait, or false if the error does not imply a wait. +// suggested seconds to wait, or false if the error does not imply a wait. It does not +// address whether the error *should* be retried, since some errors (like a 3xx) may +// request delay without retry. func SuggestsClientDelay(err error) (int, bool) { switch t := err.(type) { case APIStatus: if t.Status().Details != nil { switch t.Status().Reason { - case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout: + // this StatusReason explicitly requests the caller to delay the action + case metav1.StatusReasonServerTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + // If the client requests that we retry after a certain number of seconds + if t.Status().Details.RetryAfterSeconds > 0 { return int(t.Status().Details.RetryAfterSeconds), true } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD index 571a6b1e91..5b88754630 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -17,8 +15,8 @@ go_test( "quantity_test.go", "scale_int_test.go", ], + importpath = "k8s.io/apimachinery/pkg/api/resource", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", @@ -36,20 +34,41 @@ go_library( "quantity_proto.go", "scale_int.go", "suffix.go", + "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/api/resource", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/gopkg.in/inf.v0:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) go_test( name = "go_default_xtest", srcs = ["quantity_example_test.go"], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/api/resource_test", deps = ["//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index b905e57f0f..342ff29145 100755 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -7,7 +7,7 @@ reviewers: - mikedanese - saad-ali - janetkuo -- timstclair +- tallclair - eparis - timothysc - jbeda diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 608299da4d..091d11bdba 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -87,6 +87,7 @@ option go_package = "resource"; // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true message Quantity { optional string string = 1; diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 3a95608821..682ee9aa64 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" - "k8s.io/apimachinery/pkg/openapi" + openapi "k8s.io/kube-openapi/pkg/common" ) // Quantity is a fixed-point representation of a number. @@ -93,6 +93,7 @@ import ( // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true // +k8s:openapi-gen=true type Quantity struct { // i is the quantity in int64 scaled form, if d.Dec == nil @@ -415,7 +416,7 @@ func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { // Note about BinarySI: // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // -1 and +1, it will be emitted as if q.Format were DecimalSI. -// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be // rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go new file mode 100644 index 0000000000..118dfca07e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -0,0 +1,44 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package resource + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Quantity).DeepCopyInto(out.(*Quantity)) + return nil + }, InType: reflect.TypeOf(&Quantity{})}, + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quantity) DeepCopyInto(out *Quantity) { + *out = in.DeepCopy() + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 2ce980fa75..7354f5e2fa 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = [ + "controller_ref_test.go", "duration_test.go", "group_version_test.go", "helpers_test.go", @@ -19,18 +18,20 @@ go_test( "time_test.go", "types_test.go", ], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/ugorji/go/codec:go_default_library", + "//vendor/github.com/json-iterator/go:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) go_library( name = "go_default_library", srcs = [ + "controller_ref.go", "conversion.go", "doc.go", "duration.go", @@ -50,7 +51,7 @@ go_library( "zz_generated.deepcopy.go", "zz_generated.defaults.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", @@ -60,12 +61,35 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:all-srcs", + ], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 381a525094..7f5eb58602 100755 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -12,7 +12,6 @@ reviewers: - davidopp - sttts - quinton-hoole -- kargakis - luxas - janetkuo - justinsb diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go new file mode 100644 index 0000000000..042cd5b9c5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsControlledBy checks if the object has a controllerRef set to the given owner +func IsControlledBy(obj Object, owner Object) bool { + ref := GetControllerOf(obj) + if ref == nil { + return false + } + return ref.UID == owner.GetUID() +} + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +func GetControllerOf(controllee Object) *OwnerReference { + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref + } + } + return nil +} + +// NewControllerRef creates an OwnerReference pointing to the given owner. +func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { + blockOwnerDeletion := true + isController := true + return &OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index b9a8fd02d5..7049c9a33b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -39,6 +39,9 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_unversioned_Time_To_unversioned_Time, + Convert_Pointer_v1_Duration_To_v1_Duration, + Convert_v1_Duration_To_Pointer_v1_Duration, + Convert_Slice_string_To_unversioned_Time, Convert_resource_Quantity_To_resource_Quantity, @@ -181,6 +184,21 @@ func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s convers return nil } +func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error { + if *in == nil { + *out = Duration{} // zero duration + return nil + } + *out = **in // copy + return nil +} + +func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error { + temp := *in //copy + *out = &temp + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 52273240f0..61f201cdf5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,4 +19,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io -package v1 +package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 9e2b289da1..653b30237b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -44,6 +44,7 @@ limitations under the License. Initializers LabelSelector LabelSelectorRequirement + List ListMeta ListOptions MicroTime @@ -67,6 +68,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + import time "time" import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" @@ -169,71 +172,75 @@ func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{26} + return fileDescriptorGenerated, []int{27} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -255,6 +262,7 @@ func init() { proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") @@ -431,6 +439,14 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) return i, nil } @@ -966,6 +982,44 @@ func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -989,6 +1043,10 @@ func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) return i, nil } @@ -1040,6 +1098,13 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0 } i++ + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) return i, nil } @@ -1088,20 +1153,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n5, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n6, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -1189,11 +1254,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n7, err := m.Initializers.MarshalTo(dAtA[i:]) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } return i, nil } @@ -1353,11 +1418,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) @@ -1374,11 +1439,11 @@ func (m *Status) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n9, err := m.Details.MarshalTo(dAtA[i:]) + n10, err := m.Details.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 } dAtA[i] = 0x30 i++ @@ -1570,11 +1635,11 @@ func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n10, err := m.Object.MarshalTo(dAtA[i:]) + n11, err := m.Object.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 return i, nil } @@ -1665,6 +1730,10 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1868,6 +1937,20 @@ func (m *LabelSelectorRequirement) Size() (n int) { return n } +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ListMeta) Size() (n int) { var l int _ = l @@ -1875,6 +1958,8 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -1892,6 +1977,9 @@ func (m *ListOptions) Size() (n int) { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } n += 2 + n += 1 + sovGenerated(uint64(m.Limit)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2149,6 +2237,8 @@ func (this *APIResource) String() string { `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s @@ -2274,6 +2364,17 @@ func (this *LabelSelectorRequirement) String() string { }, "") return s } +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ListMeta) String() string { if this == nil { return "nil" @@ -2281,6 +2382,7 @@ func (this *ListMeta) String() string { s := strings.Join([]string{`&ListMeta{`, `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `}`, }, "") return s @@ -2296,6 +2398,8 @@ func (this *ListOptions) String() string { `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `}`, }, "") return s @@ -2953,6 +3057,64 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4839,6 +5001,117 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } return nil } +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ListMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4926,6 +5199,35 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5123,6 +5425,54 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } } m.IncludeUninitialized = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7365,152 +7715,157 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2351 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x52, 0x22, 0x45, 0x3e, 0x8a, 0xfa, 0x98, 0xc8, 0x2d, 0x23, 0xb4, 0xa4, 0xb2, 0x29, - 0x02, 0xa5, 0x75, 0xc8, 0x4a, 0x69, 0x03, 0xd7, 0x6d, 0xdd, 0x6a, 0x45, 0xd9, 0x10, 0x62, 0xd9, - 0xc4, 0x28, 0x76, 0x51, 0xd7, 0x28, 0xba, 0x5a, 0x8e, 0xa8, 0xad, 0x96, 0xbb, 0x9b, 0x99, 0xa1, - 0x6c, 0x35, 0x87, 0xe6, 0xd0, 0x02, 0x3d, 0x14, 0x85, 0x8f, 0x3d, 0x15, 0x31, 0xda, 0xbf, 0xa0, - 0x7f, 0x40, 0x4f, 0x05, 0xea, 0x63, 0x80, 0x5e, 0x72, 0x28, 0x88, 0x98, 0x39, 0xf4, 0x14, 0xf4, - 0x2e, 0xa0, 0x40, 0x31, 0xb3, 0xb3, 0x5f, 0xa4, 0x18, 0x2d, 0xe3, 0xa0, 0xc8, 0x49, 0xdc, 0xf7, - 0xf1, 0x7b, 0x6f, 0x66, 0xde, 0xbc, 0xf7, 0xe6, 0x09, 0xf6, 0x4f, 0xae, 0xb1, 0x86, 0xed, 0x35, - 0x4f, 0xfa, 0x87, 0x84, 0xba, 0x84, 0x13, 0xd6, 0x3c, 0x25, 0x6e, 0xc7, 0xa3, 0x4d, 0xc5, 0x30, - 0x7d, 0xbb, 0x67, 0x5a, 0xc7, 0xb6, 0x4b, 0xe8, 0x59, 0xd3, 0x3f, 0xe9, 0x0a, 0x02, 0x6b, 0xf6, - 0x08, 0x37, 0x9b, 0xa7, 0x9b, 0xcd, 0x2e, 0x71, 0x09, 0x35, 0x39, 0xe9, 0x34, 0x7c, 0xea, 0x71, - 0x0f, 0x7d, 0x23, 0xd0, 0x6a, 0x24, 0xb5, 0x1a, 0xfe, 0x49, 0x57, 0x10, 0x58, 0x43, 0x68, 0x35, - 0x4e, 0x37, 0xd7, 0xde, 0xe8, 0xda, 0xfc, 0xb8, 0x7f, 0xd8, 0xb0, 0xbc, 0x5e, 0xb3, 0xeb, 0x75, - 0xbd, 0xa6, 0x54, 0x3e, 0xec, 0x1f, 0xc9, 0x2f, 0xf9, 0x21, 0x7f, 0x05, 0xa0, 0x6b, 0x13, 0x5d, - 0xa1, 0x7d, 0x97, 0xdb, 0x3d, 0x32, 0xea, 0xc5, 0xda, 0x5b, 0x97, 0x29, 0x30, 0xeb, 0x98, 0xf4, - 0xcc, 0x31, 0xbd, 0x37, 0x27, 0xe9, 0xf5, 0xb9, 0xed, 0x34, 0x6d, 0x97, 0x33, 0x4e, 0x47, 0x95, - 0xf4, 0x7f, 0xcc, 0x42, 0x71, 0xbb, 0xbd, 0x77, 0x8b, 0x7a, 0x7d, 0x1f, 0xad, 0xc3, 0x9c, 0x6b, - 0xf6, 0x48, 0x55, 0x5b, 0xd7, 0x36, 0x4a, 0xc6, 0xc2, 0xb3, 0x41, 0x7d, 0x66, 0x38, 0xa8, 0xcf, - 0xdd, 0x31, 0x7b, 0x04, 0x4b, 0x0e, 0x72, 0xa0, 0x78, 0x4a, 0x28, 0xb3, 0x3d, 0x97, 0x55, 0x73, - 0xeb, 0xb3, 0x1b, 0xe5, 0xad, 0x1b, 0x8d, 0x2c, 0x9b, 0xd6, 0x90, 0x06, 0xee, 0x07, 0xaa, 0x37, - 0x3d, 0xda, 0xb2, 0x99, 0xe5, 0x9d, 0x12, 0x7a, 0x66, 0x2c, 0x2b, 0x2b, 0x45, 0xc5, 0x64, 0x38, - 0xb2, 0x80, 0x7e, 0xa3, 0xc1, 0xb2, 0x4f, 0xc9, 0x11, 0xa1, 0x94, 0x74, 0x14, 0xbf, 0x3a, 0xbb, - 0xae, 0x7d, 0x01, 0x66, 0xab, 0xca, 0xec, 0x72, 0x7b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0x7f, 0xd6, - 0x60, 0x8d, 0x11, 0x7a, 0x4a, 0xe8, 0x76, 0xa7, 0x43, 0x09, 0x63, 0xc6, 0xd9, 0x8e, 0x63, 0x13, - 0x97, 0xef, 0xec, 0xb5, 0x30, 0xab, 0xce, 0xc9, 0x7d, 0xf8, 0x51, 0x36, 0x87, 0x0e, 0x26, 0xe1, - 0x18, 0xba, 0xf2, 0x68, 0x6d, 0xa2, 0x08, 0xc3, 0x9f, 0xe1, 0x86, 0x7e, 0x04, 0x0b, 0xe1, 0x41, - 0xde, 0xb6, 0x19, 0x47, 0xf7, 0xa1, 0xd0, 0x15, 0x1f, 0xac, 0xaa, 0x49, 0x07, 0x1b, 0xd9, 0x1c, - 0x0c, 0x31, 0x8c, 0x45, 0xe5, 0x4f, 0x41, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x4f, 0x73, 0x50, 0xde, - 0x6e, 0xef, 0x61, 0xc2, 0xbc, 0x3e, 0xb5, 0x48, 0x86, 0xa0, 0xd9, 0x02, 0x10, 0x7f, 0x99, 0x6f, - 0x5a, 0xa4, 0x53, 0xcd, 0xad, 0x6b, 0x1b, 0x45, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x71, 0x70, 0x42, - 0x4a, 0xa0, 0x9e, 0xd8, 0x6e, 0x47, 0x9e, 0x76, 0x02, 0xf5, 0x6d, 0xdb, 0xed, 0x60, 0xc9, 0x41, - 0xb7, 0x21, 0x7f, 0x4a, 0xe8, 0xa1, 0xd8, 0x7f, 0x11, 0x10, 0xdf, 0xca, 0xb6, 0xbc, 0xfb, 0x42, - 0xc5, 0x28, 0x0d, 0x07, 0xf5, 0xbc, 0xfc, 0x89, 0x03, 0x10, 0xd4, 0x00, 0x60, 0xc7, 0x1e, 0xe5, - 0xd2, 0x9d, 0x6a, 0x7e, 0x7d, 0x76, 0xa3, 0x64, 0x2c, 0x0a, 0xff, 0x0e, 0x22, 0x2a, 0x4e, 0x48, - 0xa0, 0x6b, 0xb0, 0xc0, 0x6c, 0xb7, 0xdb, 0x77, 0x4c, 0x2a, 0x08, 0xd5, 0x82, 0xf4, 0x73, 0x55, - 0xf9, 0xb9, 0x70, 0x90, 0xe0, 0xe1, 0x94, 0xa4, 0xb0, 0x64, 0x99, 0x9c, 0x74, 0x3d, 0x6a, 0x13, - 0x56, 0x9d, 0x8f, 0x2d, 0xed, 0x44, 0x54, 0x9c, 0x90, 0xd0, 0xff, 0xaa, 0xc1, 0x52, 0x62, 0xbf, - 0xe5, 0xd9, 0x5e, 0x83, 0x85, 0x6e, 0x22, 0xb2, 0xd5, 0xde, 0x47, 0xd6, 0x93, 0x51, 0x8f, 0x53, - 0x92, 0x88, 0x40, 0x89, 0x2a, 0xa4, 0xf0, 0x06, 0x6f, 0x66, 0x0e, 0x8c, 0xd0, 0x87, 0xd8, 0x52, - 0x82, 0xc8, 0x70, 0x8c, 0xac, 0xff, 0x5b, 0x93, 0x41, 0x12, 0xde, 0x69, 0xb4, 0x91, 0xc8, 0x1b, - 0x9a, 0x5c, 0xf2, 0xc2, 0x84, 0x3b, 0x7f, 0xc9, 0x65, 0xcb, 0x7d, 0x29, 0x2e, 0xdb, 0xf5, 0xe2, - 0x1f, 0x3f, 0xa8, 0xcf, 0xbc, 0xff, 0xaf, 0xf5, 0x19, 0xfd, 0x93, 0x1c, 0x54, 0x5a, 0xc4, 0x21, - 0x9c, 0xdc, 0xf5, 0xb9, 0x5c, 0xc1, 0x4d, 0x40, 0x5d, 0x6a, 0x5a, 0xa4, 0x4d, 0xa8, 0xed, 0x75, - 0x0e, 0x88, 0xe5, 0xb9, 0x1d, 0x26, 0x8f, 0x68, 0xd6, 0xf8, 0xca, 0x70, 0x50, 0x47, 0xb7, 0xc6, - 0xb8, 0xf8, 0x02, 0x0d, 0xe4, 0x40, 0xc5, 0xa7, 0xf2, 0xb7, 0xcd, 0x55, 0xc2, 0x15, 0x81, 0xfe, - 0x66, 0xb6, 0xb5, 0xb7, 0x93, 0xaa, 0xc6, 0xca, 0x70, 0x50, 0xaf, 0xa4, 0x48, 0x38, 0x0d, 0x8e, - 0x7e, 0x0c, 0xcb, 0x1e, 0xf5, 0x8f, 0x4d, 0xb7, 0x45, 0x7c, 0xe2, 0x76, 0x88, 0xcb, 0x99, 0xbc, - 0x7c, 0x45, 0x63, 0x55, 0xa4, 0xc9, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x8a, 0x4f, - 0x3d, 0xdf, 0xec, 0x9a, 0x02, 0xb1, 0xed, 0x39, 0xb6, 0x75, 0x26, 0x2f, 0x67, 0xc9, 0xb8, 0x3a, - 0x1c, 0xd4, 0x57, 0xda, 0xa3, 0xcc, 0xf3, 0x41, 0xfd, 0x25, 0xb9, 0x75, 0x82, 0x12, 0x33, 0xf1, - 0x38, 0x8c, 0xbe, 0x07, 0xc5, 0x56, 0x9f, 0x4a, 0x0a, 0xfa, 0x21, 0x14, 0x3b, 0xea, 0xb7, 0xda, - 0xd5, 0x57, 0xc2, 0x1a, 0x12, 0xca, 0x9c, 0x0f, 0xea, 0x15, 0x51, 0x2a, 0x1b, 0x21, 0x01, 0x47, - 0x2a, 0xfa, 0x43, 0xa8, 0xec, 0x3e, 0xf6, 0x3d, 0xca, 0xc3, 0xf3, 0x7a, 0x0d, 0x0a, 0x44, 0x12, - 0x24, 0x5a, 0x31, 0x4e, 0x7c, 0x81, 0x18, 0x56, 0x5c, 0xf4, 0x2a, 0xe4, 0xc9, 0x63, 0xd3, 0xe2, - 0x2a, 0x83, 0x55, 0x94, 0x58, 0x7e, 0x57, 0x10, 0x71, 0xc0, 0xd3, 0x9f, 0x6a, 0x00, 0xb7, 0x48, - 0x84, 0xbd, 0x0d, 0x4b, 0xe1, 0xa5, 0x48, 0xdf, 0xd5, 0xaf, 0x2a, 0xed, 0x25, 0x9c, 0x66, 0xe3, - 0x51, 0x79, 0xd4, 0x86, 0x55, 0xdb, 0xb5, 0x9c, 0x7e, 0x87, 0xdc, 0x73, 0x6d, 0xd7, 0xe6, 0xb6, - 0xe9, 0xd8, 0xbf, 0x8a, 0xf2, 0xe8, 0xd7, 0x14, 0xce, 0xea, 0xde, 0x05, 0x32, 0xf8, 0x42, 0x4d, - 0xfd, 0x21, 0x94, 0x64, 0x86, 0x10, 0xc9, 0x54, 0xac, 0x4a, 0x26, 0x08, 0xe5, 0x57, 0xb4, 0x2a, - 0x29, 0x81, 0x03, 0x5e, 0x94, 0x8d, 0x73, 0x93, 0xb2, 0x71, 0xe2, 0x42, 0x38, 0x50, 0x09, 0x74, - 0xc3, 0x02, 0x91, 0xc9, 0xc2, 0x55, 0x28, 0x86, 0x0b, 0x57, 0x56, 0xa2, 0xc6, 0x20, 0x04, 0xc2, - 0x91, 0x44, 0xc2, 0xda, 0x31, 0xa4, 0xb2, 0x5d, 0x36, 0x63, 0xaf, 0xc3, 0xbc, 0xca, 0x37, 0xca, - 0xd6, 0x92, 0x12, 0x9b, 0x0f, 0x4f, 0x21, 0xe4, 0x27, 0x2c, 0xfd, 0x1a, 0xaa, 0x93, 0xba, 0x89, - 0x17, 0xc8, 0xc7, 0xd9, 0x5d, 0xd1, 0xff, 0xa0, 0xc1, 0x72, 0x12, 0x29, 0xfb, 0xf1, 0x65, 0x37, - 0x72, 0x79, 0xdd, 0x4d, 0xec, 0xc8, 0x9f, 0x34, 0x58, 0x4d, 0x2d, 0x6d, 0xaa, 0x13, 0x9f, 0xc2, - 0xa9, 0x64, 0x70, 0xcc, 0x4e, 0x11, 0x1c, 0x4d, 0x28, 0xef, 0x45, 0x71, 0x4f, 0x2f, 0xef, 0x54, - 0xf4, 0xbf, 0x69, 0xb0, 0x90, 0xd0, 0x60, 0xe8, 0x21, 0xcc, 0x8b, 0xfc, 0x66, 0xbb, 0x5d, 0xd5, - 0x45, 0x65, 0x2c, 0x96, 0x09, 0x90, 0x78, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x05, - 0x4a, 0x58, 0xdf, 0xe1, 0x2a, 0xb5, 0x5f, 0xcd, 0x58, 0xd6, 0xb8, 0xc9, 0xfb, 0xcc, 0x00, 0x91, - 0xa3, 0xb0, 0xd4, 0xc7, 0x0a, 0x47, 0xff, 0x67, 0x0e, 0x2a, 0xb7, 0xcd, 0x43, 0xe2, 0x1c, 0x10, - 0x87, 0x58, 0xdc, 0xa3, 0xe8, 0x3d, 0x28, 0xf7, 0x4c, 0x6e, 0x1d, 0x4b, 0x6a, 0xd8, 0x0b, 0xb6, - 0xb2, 0x19, 0x4a, 0x21, 0x35, 0xf6, 0x63, 0x98, 0x5d, 0x97, 0xd3, 0x33, 0xe3, 0x25, 0xb5, 0xb0, - 0x72, 0x82, 0x83, 0x93, 0xd6, 0x64, 0x03, 0x2f, 0xbf, 0x77, 0x1f, 0xfb, 0xa2, 0x88, 0x4e, 0xff, - 0x6e, 0x48, 0xb9, 0x80, 0xc9, 0xbb, 0x7d, 0x9b, 0x92, 0x1e, 0x71, 0x79, 0xdc, 0xc0, 0xef, 0x8f, - 0xe0, 0xe3, 0x31, 0x8b, 0x6b, 0x37, 0x60, 0x79, 0xd4, 0x79, 0xb4, 0x0c, 0xb3, 0x27, 0xe4, 0x2c, - 0x88, 0x05, 0x2c, 0x7e, 0xa2, 0x55, 0xc8, 0x9f, 0x9a, 0x4e, 0x5f, 0xe5, 0x1f, 0x1c, 0x7c, 0x5c, - 0xcf, 0x5d, 0xd3, 0xf4, 0xbf, 0x68, 0x50, 0x9d, 0xe4, 0x08, 0xfa, 0x7a, 0x02, 0xc8, 0x28, 0x2b, - 0xaf, 0x66, 0xdf, 0x26, 0x67, 0x01, 0xea, 0x2e, 0x14, 0x3d, 0x5f, 0x3c, 0xb9, 0x3c, 0xaa, 0xe2, - 0xfc, 0xf5, 0x30, 0x76, 0xef, 0x2a, 0xfa, 0xf9, 0xa0, 0x7e, 0x25, 0x05, 0x1f, 0x32, 0x70, 0xa4, - 0x8a, 0x74, 0x28, 0x48, 0x7f, 0x44, 0x51, 0x16, 0xed, 0x93, 0x3c, 0xfc, 0xfb, 0x92, 0x82, 0x15, - 0x47, 0x7f, 0x0f, 0x8a, 0xa2, 0x3b, 0xdc, 0x27, 0xdc, 0x14, 0x57, 0x86, 0x11, 0xe7, 0xe8, 0xb6, - 0xed, 0x9e, 0x28, 0xd7, 0xa2, 0x2b, 0x73, 0xa0, 0xe8, 0x38, 0x92, 0xb8, 0xa8, 0x4c, 0xe5, 0xa6, - 0x2b, 0x53, 0xfa, 0x7f, 0x73, 0x50, 0x16, 0xd6, 0xc3, 0xca, 0xf7, 0x7d, 0xa8, 0x38, 0xc9, 0x35, - 0x29, 0x2f, 0xae, 0x28, 0xc0, 0x74, 0x94, 0xe2, 0xb4, 0xac, 0x50, 0x3e, 0xb2, 0x89, 0xd3, 0x89, - 0x94, 0x73, 0x69, 0xe5, 0x9b, 0x49, 0x26, 0x4e, 0xcb, 0x8a, 0xec, 0xf3, 0x48, 0x9c, 0xb6, 0x6a, - 0x5f, 0xa2, 0xec, 0xf3, 0x13, 0x41, 0xc4, 0x01, 0xef, 0xa2, 0x15, 0xcf, 0x4d, 0x59, 0x98, 0xaf, - 0xc3, 0xa2, 0xe8, 0x31, 0xbc, 0x3e, 0x0f, 0x7b, 0xbc, 0xbc, 0xec, 0x46, 0xd0, 0x70, 0x50, 0x5f, - 0x7c, 0x27, 0xc5, 0xc1, 0x23, 0x92, 0x13, 0x8b, 0x7a, 0xe1, 0x73, 0x17, 0xf5, 0x77, 0xa1, 0xb4, - 0x6f, 0x5b, 0xd4, 0x13, 0x86, 0x45, 0x6e, 0x65, 0xa9, 0xbe, 0x33, 0xca, 0x41, 0xa1, 0x43, 0x21, - 0x5f, 0xec, 0x96, 0x6b, 0xba, 0x5e, 0xd0, 0x5d, 0xe6, 0xe3, 0xdd, 0xba, 0x23, 0x88, 0x38, 0xe0, - 0x5d, 0x5f, 0x15, 0x29, 0xf5, 0x77, 0x4f, 0xeb, 0x33, 0x4f, 0x9e, 0xd6, 0x67, 0x3e, 0x78, 0xaa, - 0xd2, 0xeb, 0xa7, 0x00, 0x70, 0xf7, 0xf0, 0x97, 0xc4, 0x0a, 0x42, 0xee, 0xf2, 0x87, 0xa0, 0x28, - 0x93, 0x6a, 0xfe, 0x20, 0x1f, 0x4d, 0xb9, 0x91, 0x32, 0x99, 0xe0, 0xe1, 0x94, 0x24, 0x6a, 0x42, - 0x29, 0x7a, 0x1c, 0xaa, 0x12, 0xb0, 0xa2, 0xd4, 0x4a, 0xd1, 0x0b, 0x12, 0xc7, 0x32, 0xa9, 0xf8, - 0x9f, 0xbb, 0x34, 0xfe, 0x0d, 0x98, 0xed, 0xdb, 0x1d, 0x79, 0x7e, 0x25, 0xe3, 0xdb, 0xe1, 0x1d, - 0xbe, 0xb7, 0xd7, 0x3a, 0x1f, 0xd4, 0x5f, 0x99, 0x34, 0x56, 0xe1, 0x67, 0x3e, 0x61, 0x8d, 0x7b, - 0x7b, 0x2d, 0x2c, 0x94, 0x2f, 0x8a, 0xa8, 0xc2, 0x94, 0x11, 0xb5, 0x05, 0xa0, 0x56, 0x2d, 0xb4, - 0xe7, 0x83, 0x68, 0x0a, 0x1f, 0xca, 0xb7, 0x22, 0x0e, 0x4e, 0x48, 0x21, 0x06, 0x2b, 0x16, 0x25, - 0xf2, 0xb7, 0x38, 0x7a, 0xc6, 0xcd, 0x9e, 0x5f, 0x2d, 0xca, 0x72, 0xf2, 0xcd, 0x6c, 0x29, 0x56, - 0xa8, 0x19, 0x2f, 0x2b, 0x33, 0x2b, 0x3b, 0xa3, 0x60, 0x78, 0x1c, 0x1f, 0x79, 0xb0, 0xd2, 0x51, - 0x8d, 0x7b, 0x6c, 0xb4, 0x34, 0xb5, 0xd1, 0x2b, 0xc2, 0x60, 0x6b, 0x14, 0x08, 0x8f, 0x63, 0xa3, - 0x9f, 0xc3, 0x5a, 0x48, 0x1c, 0x7f, 0x3d, 0x55, 0x41, 0xee, 0x54, 0x4d, 0xbc, 0xe7, 0x5a, 0x13, - 0xa5, 0xf0, 0x67, 0x20, 0xa0, 0x0e, 0x14, 0x9c, 0xa0, 0x40, 0x96, 0x65, 0x75, 0xfa, 0x41, 0xb6, - 0x55, 0xc4, 0xd1, 0xdf, 0x48, 0x16, 0xc6, 0xe8, 0x05, 0xa1, 0x6a, 0xa2, 0xc2, 0x46, 0x8f, 0xa1, - 0x6c, 0xba, 0xae, 0xc7, 0xcd, 0xe0, 0x3d, 0xb7, 0x20, 0x4d, 0x6d, 0x4f, 0x6d, 0x6a, 0x3b, 0xc6, - 0x18, 0x29, 0xc4, 0x09, 0x0e, 0x4e, 0x9a, 0x42, 0x8f, 0x60, 0xc9, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, - 0x11, 0xa1, 0xc4, 0x15, 0x8f, 0xff, 0x8a, 0xb4, 0xfe, 0x9d, 0x8c, 0xd6, 0x53, 0xca, 0x71, 0x48, - 0xa7, 0xe9, 0x0c, 0x8f, 0x5a, 0x41, 0x0d, 0x80, 0x23, 0xdb, 0x55, 0xed, 0x54, 0x75, 0x31, 0x9e, - 0x76, 0xdc, 0x8c, 0xa8, 0x38, 0x21, 0x81, 0xbe, 0x0b, 0x65, 0xcb, 0xe9, 0x33, 0x4e, 0x82, 0xb1, - 0xca, 0x92, 0xbc, 0x41, 0xd1, 0xfa, 0x76, 0x62, 0x16, 0x4e, 0xca, 0xa1, 0x63, 0x58, 0xb0, 0x13, - 0x7d, 0x5b, 0x75, 0x59, 0xc6, 0xe2, 0xd6, 0xd4, 0xcd, 0x1a, 0x33, 0x96, 0x45, 0x26, 0x4a, 0x52, - 0x70, 0x0a, 0x79, 0xed, 0x7b, 0x50, 0xfe, 0x9c, 0x6d, 0x84, 0x68, 0x43, 0x46, 0x8f, 0x6e, 0xaa, - 0x36, 0xe4, 0xef, 0x39, 0x58, 0x4c, 0x6f, 0x78, 0xd4, 0xae, 0x6b, 0x13, 0xc7, 0x64, 0x61, 0x56, - 0x9e, 0x9d, 0x98, 0x95, 0x55, 0xf2, 0x9b, 0x7b, 0x91, 0xe4, 0xb7, 0x05, 0x60, 0xfa, 0x76, 0x98, - 0xf7, 0x82, 0x3c, 0x1a, 0x65, 0xae, 0x78, 0x10, 0x84, 0x13, 0x52, 0x72, 0x10, 0xe6, 0xb9, 0x9c, - 0x7a, 0x8e, 0x43, 0xa8, 0xaa, 0x7c, 0xc1, 0x20, 0x2c, 0xa2, 0xe2, 0x84, 0x04, 0xba, 0x09, 0xe8, - 0xd0, 0xf1, 0xac, 0x13, 0xb9, 0x05, 0xe1, 0x3d, 0x97, 0x59, 0xb2, 0x18, 0xcc, 0x55, 0x8c, 0x31, - 0x2e, 0xbe, 0x40, 0x43, 0xbf, 0x0b, 0xe9, 0x49, 0x08, 0xba, 0x11, 0x6c, 0x80, 0x16, 0x8d, 0x2a, - 0xa6, 0x5b, 0xbc, 0x7e, 0x15, 0x4a, 0xd8, 0xf3, 0x78, 0xdb, 0xe4, 0xc7, 0x0c, 0xd5, 0x21, 0xef, - 0x8b, 0x1f, 0x6a, 0xcc, 0x25, 0x27, 0x8d, 0x92, 0x83, 0x03, 0xba, 0xfe, 0x7b, 0x0d, 0x5e, 0x9e, - 0x38, 0x75, 0x12, 0x1b, 0x69, 0x45, 0x5f, 0xca, 0xa5, 0x68, 0x23, 0x63, 0x39, 0x9c, 0x90, 0x12, - 0xdd, 0x52, 0x6a, 0x54, 0x35, 0xda, 0x2d, 0xa5, 0xac, 0xe1, 0xb4, 0xac, 0xfe, 0x9f, 0x1c, 0x14, - 0x82, 0x07, 0x05, 0x7a, 0x08, 0x45, 0x71, 0x25, 0x3a, 0x26, 0x37, 0xa5, 0xe5, 0xcc, 0x33, 0xe3, - 0xb0, 0xeb, 0x8c, 0x6b, 0x6c, 0x48, 0xc1, 0x11, 0x22, 0x7a, 0x0d, 0x0a, 0x4c, 0xda, 0x51, 0xee, - 0x45, 0x49, 0x32, 0xb0, 0x8e, 0x15, 0x57, 0xf4, 0x2e, 0x3d, 0xc2, 0x98, 0xd9, 0x0d, 0x63, 0x36, - 0xea, 0x5d, 0xf6, 0x03, 0x32, 0x0e, 0xf9, 0xe8, 0x2d, 0xf1, 0x7e, 0x32, 0x59, 0xd4, 0xbb, 0xd5, - 0x42, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xfa, 0x82, 0x02, 0x97, 0xdf, 0x58, 0x49, 0xa3, 0x07, 0x30, - 0xdf, 0x21, 0xdc, 0xb4, 0x9d, 0xa0, 0x65, 0xcb, 0x3c, 0x53, 0x0b, 0xc0, 0x5a, 0x81, 0xaa, 0x51, - 0x16, 0x3e, 0xa9, 0x0f, 0x1c, 0x02, 0x8a, 0xfb, 0x66, 0x79, 0x9d, 0x60, 0x20, 0x9c, 0x8f, 0xef, - 0xdb, 0x8e, 0xd7, 0x21, 0x58, 0x72, 0xf4, 0x27, 0x1a, 0x94, 0x03, 0xa4, 0x1d, 0xb3, 0xcf, 0x08, - 0xda, 0x8c, 0x56, 0x11, 0x1c, 0x77, 0x58, 0x8a, 0xe7, 0xde, 0x39, 0xf3, 0xc9, 0xf9, 0xa0, 0x5e, - 0x92, 0x62, 0xe2, 0x23, 0x5a, 0x40, 0x62, 0x8f, 0x72, 0x97, 0xec, 0xd1, 0xab, 0x90, 0x97, 0xed, - 0xb1, 0xda, 0xcc, 0xa8, 0xbf, 0x93, 0x2d, 0x34, 0x0e, 0x78, 0xfa, 0xc7, 0x39, 0xa8, 0xa4, 0x16, - 0x97, 0xa1, 0x99, 0x8b, 0x1e, 0xf9, 0xb9, 0x0c, 0x83, 0xa3, 0xc9, 0x63, 0xfc, 0x9f, 0x42, 0xc1, - 0x12, 0xeb, 0x0b, 0xff, 0x8f, 0xb2, 0x39, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x23, 0x49, 0x7e, 0x32, - 0xac, 0x00, 0xd1, 0x2d, 0x58, 0xa1, 0x84, 0xd3, 0xb3, 0xed, 0x23, 0x4e, 0x68, 0xb2, 0x47, 0xcf, - 0xc7, 0xed, 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x61, 0x86, 0x2c, 0xbc, 0x40, 0x86, 0xd4, 0x1d, - 0x98, 0xfb, 0x3f, 0xb6, 0xe6, 0x3f, 0x83, 0x52, 0xdc, 0x3c, 0x7d, 0xc1, 0x26, 0xf5, 0x5f, 0x40, - 0x51, 0x44, 0x63, 0xd8, 0xf4, 0x5f, 0x52, 0x80, 0xd2, 0xa5, 0x21, 0x97, 0xa5, 0x34, 0xe8, 0x5b, - 0x10, 0xfc, 0x77, 0x46, 0x64, 0x53, 0x9b, 0x93, 0x5e, 0x2a, 0x9b, 0xee, 0x09, 0x02, 0x0e, 0xe8, - 0x89, 0x61, 0xcf, 0x6f, 0x35, 0x00, 0xf9, 0xc4, 0xdb, 0x3d, 0x15, 0xcf, 0xf2, 0x75, 0x98, 0x13, - 0x27, 0x30, 0xea, 0x98, 0xbc, 0x46, 0x92, 0x83, 0xee, 0x41, 0xc1, 0x93, 0x4d, 0x95, 0x9a, 0xbe, - 0xbc, 0x31, 0x31, 0xf2, 0xd4, 0x3f, 0x5e, 0x1b, 0xd8, 0x7c, 0xb4, 0xfb, 0x98, 0x13, 0x57, 0xf8, - 0x18, 0x47, 0x5d, 0xd0, 0x99, 0x61, 0x05, 0x66, 0x6c, 0x3c, 0x7b, 0x5e, 0x9b, 0xf9, 0xf0, 0x79, - 0x6d, 0xe6, 0xa3, 0xe7, 0xb5, 0x99, 0xf7, 0x87, 0x35, 0xed, 0xd9, 0xb0, 0xa6, 0x7d, 0x38, 0xac, - 0x69, 0x1f, 0x0d, 0x6b, 0xda, 0xc7, 0xc3, 0x9a, 0xf6, 0xe4, 0x93, 0xda, 0xcc, 0x83, 0xdc, 0xe9, - 0xe6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x80, 0x43, 0x11, 0x41, 0xbe, 0x1e, 0x00, 0x00, + // 2428 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, + 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b, + 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32, + 0x63, 0x55, 0x76, 0x06, 0x31, 0x8c, 0x10, 0x9d, 0x76, 0xc5, 0x69, 0xd2, 0xee, 0xf6, 0x56, 0x95, + 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5, + 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71, + 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9, + 0xde, 0xab, 0xaa, 0xf7, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, + 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e, + 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09, + 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e, + 0xfa, 0x92, 0xaf, 0x55, 0x8f, 0x6b, 0xd5, 0x7b, 0xc7, 0x1d, 0x41, 0x60, 0x75, 0xa1, 0x55, 0x3f, + 0xd9, 0x58, 0x7d, 0xab, 0x63, 0xf3, 0xa3, 0xfe, 0x41, 0xdd, 0xf2, 0xba, 0x8d, 0x8e, 0xd7, 0xf1, + 0x1a, 0x52, 0xf9, 0xa0, 0x7f, 0x28, 0xbf, 0xe4, 0x87, 0xfc, 0xe5, 0x83, 0xae, 0x4e, 0x74, 0x85, + 0xf6, 0x5d, 0x6e, 0x77, 0xc9, 0xa8, 0x17, 0xab, 0xef, 0x5c, 0xa6, 0xc0, 0xac, 0x23, 0xd2, 0x35, + 0xc7, 0xf4, 0xde, 0x9e, 0xa4, 0xd7, 0xe7, 0xb6, 0xd3, 0xb0, 0x5d, 0xce, 0x38, 0x1d, 0x55, 0xd2, + 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9, + 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b, + 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac, + 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x36, 0xad, 0x2e, 0x0d, 0xdc, 0xf7, 0x55, 0x6f, 0x7a, + 0xb4, 0x69, 0x33, 0xcb, 0x3b, 0x21, 0xf4, 0xcc, 0x58, 0x52, 0x56, 0x0a, 0x8a, 0xc9, 0x70, 0x68, + 0x01, 0xfd, 0x5c, 0x83, 0xa5, 0x1e, 0x25, 0x87, 0x84, 0x52, 0xd2, 0x56, 0xfc, 0x4a, 0x76, 0x4d, + 0xfb, 0x0c, 0xcc, 0x56, 0x94, 0xd9, 0xa5, 0xd6, 0x08, 0x3e, 0x1e, 0xb3, 0x88, 0x7e, 0xa7, 0xc1, + 0x2a, 0x23, 0xf4, 0x84, 0xd0, 0xad, 0x76, 0x9b, 0x12, 0xc6, 0x8c, 0xb3, 0x6d, 0xc7, 0x26, 0x2e, + 0xdf, 0xde, 0x6d, 0x62, 0x56, 0x99, 0x95, 0xfb, 0xf0, 0x9d, 0x74, 0x0e, 0xed, 0x4f, 0xc2, 0x31, + 0x74, 0xe5, 0xd1, 0xea, 0x44, 0x11, 0x86, 0x5f, 0xe0, 0x86, 0x7e, 0x08, 0xf3, 0xc1, 0x41, 0xde, + 0xb6, 0x19, 0x47, 0xf7, 0x21, 0xdf, 0x11, 0x1f, 0xac, 0xa2, 0x49, 0x07, 0xeb, 0xe9, 0x1c, 0x0c, + 0x30, 0x8c, 0x05, 0xe5, 0x4f, 0x5e, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x3f, 0x67, 0xa1, 0xb4, 0xd5, + 0xda, 0xc5, 0x84, 0x79, 0x7d, 0x6a, 0x91, 0x14, 0x41, 0xb3, 0x09, 0x20, 0xfe, 0xb2, 0x9e, 0x69, + 0x91, 0x76, 0x25, 0xb3, 0xa6, 0xad, 0x17, 0x0c, 0xa4, 0xe4, 0xe0, 0x4e, 0xc8, 0xc1, 0x31, 0x29, + 0x81, 0x7a, 0x6c, 0xbb, 0x6d, 0x79, 0xda, 0x31, 0xd4, 0x77, 0x6d, 0xb7, 0x8d, 0x25, 0x07, 0xdd, + 0x86, 0xdc, 0x09, 0xa1, 0x07, 0x62, 0xff, 0x45, 0x40, 0x7c, 0x25, 0xdd, 0xf2, 0xee, 0x0b, 0x15, + 0xa3, 0x38, 0x1c, 0xd4, 0x72, 0xf2, 0x27, 0xf6, 0x41, 0x50, 0x1d, 0x80, 0x1d, 0x79, 0x94, 0x4b, + 0x77, 0x2a, 0xb9, 0xb5, 0xec, 0x7a, 0xd1, 0x58, 0x10, 0xfe, 0xed, 0x87, 0x54, 0x1c, 0x93, 0x40, + 0xd7, 0x60, 0x9e, 0xd9, 0x6e, 0xa7, 0xef, 0x98, 0x54, 0x10, 0x2a, 0x79, 0xe9, 0xe7, 0x8a, 0xf2, + 0x73, 0x7e, 0x3f, 0xc6, 0xc3, 0x09, 0x49, 0x61, 0xc9, 0x32, 0x39, 0xe9, 0x78, 0xd4, 0x26, 0xac, + 0x32, 0x17, 0x59, 0xda, 0x0e, 0xa9, 0x38, 0x26, 0x81, 0x5e, 0x87, 0x9c, 0xdc, 0xf9, 0x4a, 0x41, + 0x9a, 0x28, 0x2b, 0x13, 0x39, 0x79, 0x2c, 0xd8, 0xe7, 0xa1, 0x37, 0x61, 0x4e, 0x65, 0x4d, 0xa5, + 0x28, 0xc5, 0x16, 0x95, 0xd8, 0x5c, 0x10, 0xd6, 0x01, 0x5f, 0xff, 0xa3, 0x06, 0x8b, 0xb1, 0xf3, + 0x93, 0xb1, 0x72, 0x0d, 0xe6, 0x3b, 0xb1, 0x4c, 0x51, 0x67, 0x19, 0xae, 0x26, 0x9e, 0x45, 0x38, + 0x21, 0x89, 0x08, 0x14, 0xa9, 0x42, 0x0a, 0x2a, 0xc2, 0x46, 0xea, 0x40, 0x0b, 0x7c, 0x88, 0x2c, + 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x19, 0x74, 0x41, 0x8d, 0x40, 0xeb, 0xb1, 0x3a, + 0xa4, 0xc9, 0x2d, 0x9c, 0x9f, 0x50, 0x43, 0x2e, 0x49, 0xde, 0xcc, 0xff, 0x45, 0xf2, 0x5e, 0x2f, + 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c, + 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd, + 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x11, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, + 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0x22, 0x71, + 0xde, 0x4e, 0xb7, 0xf6, 0x56, 0x5c, 0xd5, 0x58, 0x1e, 0x0e, 0x6a, 0xe5, 0x04, 0x09, 0x27, 0xc1, + 0xd1, 0x77, 0x61, 0xc9, 0xa3, 0xbd, 0x23, 0xd3, 0x6d, 0x92, 0x1e, 0x71, 0xdb, 0xc4, 0xe5, 0x4c, + 0x26, 0x73, 0xc1, 0x58, 0x11, 0x65, 0xf7, 0xee, 0x08, 0x0f, 0x8f, 0x49, 0xa3, 0x07, 0xb0, 0xdc, + 0xa3, 0x5e, 0xcf, 0xec, 0x98, 0x02, 0xb1, 0xe5, 0x39, 0xb6, 0x75, 0x26, 0x93, 0xbd, 0x68, 0x5c, + 0x1d, 0x0e, 0x6a, 0xcb, 0xad, 0x51, 0xe6, 0xf9, 0xa0, 0xf6, 0x8a, 0xdc, 0x3a, 0x41, 0x89, 0x98, + 0x78, 0x1c, 0x46, 0xdf, 0x85, 0x42, 0xb3, 0x4f, 0x25, 0x05, 0x7d, 0x1b, 0x0a, 0x6d, 0xf5, 0x5b, + 0xed, 0xea, 0x6b, 0xc1, 0x9d, 0x14, 0xc8, 0x9c, 0x0f, 0x6a, 0x65, 0x71, 0xf5, 0xd6, 0x03, 0x02, + 0x0e, 0x55, 0xf4, 0x87, 0x50, 0xde, 0x39, 0xed, 0x79, 0x94, 0x07, 0xe7, 0xf5, 0x06, 0xe4, 0x89, + 0x24, 0x48, 0xb4, 0x42, 0x54, 0x48, 0x7d, 0x31, 0xac, 0xb8, 0x22, 0xb1, 0xc9, 0xa9, 0x69, 0x71, + 0x55, 0x11, 0xc3, 0xc4, 0xde, 0x11, 0x44, 0xec, 0xf3, 0xf4, 0x27, 0x1a, 0xc0, 0x2d, 0x12, 0x62, + 0x6f, 0xc1, 0x62, 0x90, 0x14, 0xc9, 0x5c, 0xfd, 0xbc, 0xd2, 0x5e, 0xc4, 0x49, 0x36, 0x1e, 0x95, + 0x47, 0x2d, 0x58, 0xb1, 0x5d, 0xcb, 0xe9, 0xb7, 0xc9, 0x3d, 0xd7, 0x76, 0x6d, 0x6e, 0x9b, 0x8e, + 0xfd, 0x93, 0xb0, 0x2e, 0x7f, 0x41, 0xe1, 0xac, 0xec, 0x5e, 0x20, 0x83, 0x2f, 0xd4, 0xd4, 0x1f, + 0x42, 0x51, 0x56, 0x08, 0x51, 0x9c, 0xa3, 0x72, 0xa5, 0xbd, 0xa0, 0x5c, 0x05, 0xd5, 0x3d, 0x33, + 0xa9, 0xba, 0xc7, 0x12, 0xc2, 0x81, 0xb2, 0xaf, 0x1b, 0x5c, 0x38, 0xa9, 0x2c, 0x5c, 0x85, 0x42, + 0xb0, 0x70, 0x65, 0x25, 0x6c, 0x34, 0x02, 0x20, 0x1c, 0x4a, 0xc4, 0xac, 0x1d, 0x41, 0xa2, 0xda, + 0xa5, 0x33, 0x16, 0xab, 0xbe, 0x99, 0x17, 0x57, 0xdf, 0x98, 0xa5, 0x9f, 0x41, 0x65, 0x52, 0x77, + 0xf2, 0x12, 0xf5, 0x38, 0xbd, 0x2b, 0xfa, 0xaf, 0x35, 0x58, 0x8a, 0x23, 0xa5, 0x3f, 0xbe, 0xf4, + 0x46, 0x2e, 0xbf, 0xc7, 0x63, 0x3b, 0xf2, 0x5b, 0x0d, 0x56, 0x12, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, + 0x70, 0x2a, 0x1e, 0x1c, 0xd9, 0x29, 0x82, 0xa3, 0x01, 0xa5, 0xdd, 0x30, 0xee, 0xe9, 0xe5, 0x9d, + 0x8f, 0xfe, 0x17, 0x0d, 0xe6, 0x63, 0x1a, 0x0c, 0x3d, 0x84, 0x39, 0x51, 0xdf, 0x6c, 0xb7, 0xa3, + 0xba, 0xb2, 0x94, 0x97, 0x65, 0x0c, 0x24, 0x5a, 0x57, 0xcb, 0x47, 0xc2, 0x01, 0x24, 0x6a, 0x41, + 0x9e, 0x12, 0xd6, 0x77, 0xb8, 0x2a, 0xed, 0x57, 0x53, 0x5e, 0x6b, 0xdc, 0xe4, 0x7d, 0x66, 0x80, + 0xa8, 0x51, 0x58, 0xea, 0x63, 0x85, 0xa3, 0xff, 0x3d, 0x03, 0xe5, 0xdb, 0xe6, 0x01, 0x71, 0xf6, + 0x89, 0x43, 0x2c, 0xee, 0x51, 0xf4, 0x53, 0x28, 0x75, 0x4d, 0x6e, 0x1d, 0x49, 0x6a, 0xd0, 0x5b, + 0x36, 0xd3, 0x19, 0x4a, 0x20, 0xd5, 0xf7, 0x22, 0x98, 0x1d, 0x97, 0xd3, 0x33, 0xe3, 0x15, 0xb5, + 0xb0, 0x52, 0x8c, 0x83, 0xe3, 0xd6, 0xe4, 0x83, 0x40, 0x7e, 0xef, 0x9c, 0xf6, 0xc4, 0x25, 0x3a, + 0xfd, 0x3b, 0x24, 0xe1, 0x02, 0x26, 0xef, 0xf7, 0x6d, 0x4a, 0xba, 0xc4, 0xe5, 0xd1, 0x83, 0x60, + 0x6f, 0x04, 0x1f, 0x8f, 0x59, 0x5c, 0xbd, 0x01, 0x4b, 0xa3, 0xce, 0xa3, 0x25, 0xc8, 0x1e, 0x93, + 0x33, 0x3f, 0x16, 0xb0, 0xf8, 0x89, 0x56, 0x20, 0x77, 0x62, 0x3a, 0x7d, 0x55, 0x7f, 0xb0, 0xff, + 0x71, 0x3d, 0x73, 0x4d, 0xd3, 0x7f, 0xaf, 0x41, 0x65, 0x92, 0x23, 0xe8, 0x8b, 0x31, 0x20, 0xa3, + 0xa4, 0xbc, 0xca, 0xbe, 0x4b, 0xce, 0x7c, 0xd4, 0x1d, 0x28, 0x78, 0x3d, 0xf1, 0x84, 0xf3, 0xa8, + 0x8a, 0xf3, 0x37, 0x83, 0xd8, 0xbd, 0xab, 0xe8, 0xe7, 0x83, 0xda, 0x95, 0x04, 0x7c, 0xc0, 0xc0, + 0xa1, 0x2a, 0xd2, 0x21, 0x2f, 0xfd, 0x11, 0x97, 0xb2, 0x68, 0x9f, 0xe4, 0xe1, 0xdf, 0x97, 0x14, + 0xac, 0x38, 0xfa, 0x9f, 0x34, 0x98, 0x95, 0xed, 0xe1, 0x43, 0x28, 0x88, 0xfd, 0x6b, 0x9b, 0xdc, + 0x94, 0x7e, 0xa5, 0x7e, 0x4c, 0x08, 0xed, 0x3d, 0xc2, 0xcd, 0x28, 0xbf, 0x02, 0x0a, 0x0e, 0x11, + 0x11, 0x86, 0x9c, 0xcd, 0x49, 0x37, 0x38, 0xc8, 0xb7, 0x26, 0x42, 0xab, 0xf7, 0x6f, 0x1d, 0x9b, + 0x8f, 0x76, 0x4e, 0x39, 0x71, 0xc5, 0x61, 0x44, 0xc5, 0x60, 0x57, 0x60, 0x60, 0x1f, 0x4a, 0xff, + 0x83, 0x06, 0xa1, 0x29, 0x91, 0xee, 0x8c, 0x38, 0x87, 0xb7, 0x6d, 0xf7, 0x58, 0x6d, 0x6b, 0xe8, + 0xce, 0xbe, 0xa2, 0xe3, 0x50, 0xe2, 0xa2, 0x2b, 0x36, 0x33, 0xe5, 0x15, 0x7b, 0x15, 0x0a, 0x96, + 0xe7, 0x72, 0xdb, 0xed, 0x8f, 0xd5, 0x97, 0x6d, 0x45, 0xc7, 0xa1, 0x84, 0xfe, 0x2c, 0x0b, 0x25, + 0xe1, 0x6b, 0x70, 0xc7, 0x7f, 0x13, 0xca, 0x4e, 0xfc, 0xf4, 0x94, 0xcf, 0x57, 0x14, 0x44, 0x32, + 0x1f, 0x71, 0x52, 0x56, 0x28, 0x1f, 0xda, 0xc4, 0x69, 0x87, 0xca, 0x99, 0xa4, 0xf2, 0xcd, 0x38, + 0x13, 0x27, 0x65, 0x45, 0x9d, 0x7d, 0x24, 0xe2, 0x5a, 0x35, 0x6a, 0xe1, 0xd6, 0x7e, 0x4f, 0x10, + 0xb1, 0xcf, 0xbb, 0x68, 0x7f, 0x66, 0xa7, 0xdc, 0x9f, 0xeb, 0xb0, 0x20, 0x0e, 0xd2, 0xeb, 0xf3, + 0xa0, 0x9b, 0xcd, 0xc9, 0xbe, 0x0b, 0x0d, 0x07, 0xb5, 0x85, 0xf7, 0x12, 0x1c, 0x3c, 0x22, 0x39, + 0xb1, 0x7d, 0xc9, 0x7f, 0xda, 0xf6, 0x45, 0xac, 0xda, 0xb1, 0xbb, 0x36, 0xaf, 0xcc, 0x49, 0x27, + 0xc2, 0x55, 0xdf, 0x16, 0x44, 0xec, 0xf3, 0x12, 0x47, 0x5a, 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, + 0x9e, 0x6d, 0x51, 0x4f, 0xac, 0x45, 0x5c, 0x4c, 0x2c, 0xd1, 0xb4, 0x87, 0x05, 0x3c, 0x58, 0x63, + 0xc0, 0x17, 0xae, 0xb8, 0xa6, 0xeb, 0xf9, 0xad, 0x79, 0x2e, 0x72, 0xe5, 0x8e, 0x20, 0x62, 0x9f, + 0x77, 0x7d, 0x45, 0xdc, 0x47, 0xbf, 0x7c, 0x52, 0x9b, 0x79, 0xfc, 0xa4, 0x36, 0xf3, 0xe1, 0x13, + 0x75, 0x37, 0xfd, 0x0b, 0x00, 0xee, 0x1e, 0xfc, 0x98, 0x58, 0x7e, 0xcc, 0x5f, 0xfe, 0x2a, 0x17, + 0x3d, 0x86, 0x1a, 0x06, 0xc9, 0x17, 0x6c, 0x66, 0xa4, 0xc7, 0x88, 0xf1, 0x70, 0x42, 0x12, 0x35, + 0xa0, 0x18, 0xbe, 0xd4, 0x55, 0x7c, 0x2f, 0x2b, 0xb5, 0x62, 0xf8, 0x9c, 0xc7, 0x91, 0x4c, 0x22, + 0x01, 0x67, 0x2f, 0x4d, 0x40, 0x03, 0xb2, 0x7d, 0xbb, 0x2d, 0x43, 0xa2, 0x68, 0x7c, 0x35, 0x28, + 0x80, 0xf7, 0x76, 0x9b, 0xe7, 0x83, 0xda, 0x6b, 0x93, 0x66, 0x5c, 0xfc, 0xac, 0x47, 0x58, 0xfd, + 0xde, 0x6e, 0x13, 0x0b, 0xe5, 0x8b, 0x82, 0x34, 0x3f, 0x65, 0x90, 0x6e, 0x02, 0xa8, 0x55, 0x0b, + 0x6d, 0x3f, 0x36, 0xc2, 0xa9, 0xc5, 0xad, 0x90, 0x83, 0x63, 0x52, 0x88, 0xc1, 0xb2, 0x45, 0x89, + 0xfc, 0x2d, 0x8e, 0x9e, 0x71, 0xb3, 0xeb, 0xbf, 0xdb, 0x4b, 0x9b, 0x5f, 0x4e, 0x57, 0x31, 0x85, + 0x9a, 0xf1, 0xaa, 0x32, 0xb3, 0xbc, 0x3d, 0x0a, 0x86, 0xc7, 0xf1, 0x91, 0x07, 0xcb, 0x6d, 0xf5, + 0xea, 0x89, 0x8c, 0x16, 0xa7, 0x36, 0x7a, 0x45, 0x18, 0x6c, 0x8e, 0x02, 0xe1, 0x71, 0x6c, 0xf4, + 0x43, 0x58, 0x0d, 0x88, 0xe3, 0x4f, 0xcf, 0x0a, 0xc8, 0x9d, 0xaa, 0x8a, 0xc7, 0x70, 0x73, 0xa2, + 0x14, 0x7e, 0x01, 0x02, 0x6a, 0x43, 0xde, 0xf1, 0xbb, 0x8b, 0x92, 0xbc, 0x11, 0xbe, 0x95, 0x6e, + 0x15, 0x51, 0xf4, 0xd7, 0xe3, 0x5d, 0x45, 0xf8, 0xfc, 0x52, 0x0d, 0x85, 0xc2, 0x46, 0xa7, 0x50, + 0x32, 0x5d, 0xd7, 0xe3, 0xa6, 0xff, 0x18, 0x9e, 0x97, 0xa6, 0xb6, 0xa6, 0x36, 0xb5, 0x15, 0x61, + 0x8c, 0x74, 0x31, 0x31, 0x0e, 0x8e, 0x9b, 0x42, 0x8f, 0x60, 0xd1, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, + 0x21, 0xa1, 0xc4, 0xb5, 0x08, 0xab, 0x94, 0xa5, 0xf5, 0xaf, 0xa5, 0xb4, 0x9e, 0x50, 0x8e, 0x42, + 0x3a, 0x49, 0x67, 0x78, 0xd4, 0x0a, 0xaa, 0x03, 0x1c, 0xda, 0xae, 0xea, 0x45, 0x2b, 0x0b, 0xd1, + 0xe8, 0xe9, 0x66, 0x48, 0xc5, 0x31, 0x09, 0xf4, 0x75, 0x28, 0x59, 0x4e, 0x9f, 0x71, 0xe2, 0xcf, + 0xb8, 0x16, 0x65, 0x06, 0x85, 0xeb, 0xdb, 0x8e, 0x58, 0x38, 0x2e, 0x87, 0x8e, 0x60, 0xde, 0x8e, + 0x35, 0xbd, 0x95, 0x25, 0x19, 0x8b, 0x9b, 0x53, 0x77, 0xba, 0xcc, 0x58, 0x12, 0x95, 0x28, 0x4e, + 0xc1, 0x09, 0xe4, 0xd5, 0x6f, 0x40, 0xe9, 0x53, 0xf6, 0x60, 0xa2, 0x87, 0x1b, 0x3d, 0xba, 0xa9, + 0x7a, 0xb8, 0xbf, 0x66, 0x60, 0x21, 0xb9, 0xe1, 0xe1, 0x5b, 0x47, 0x9b, 0x38, 0xb3, 0x0c, 0xaa, + 0x72, 0x76, 0x62, 0x55, 0x56, 0xc5, 0x6f, 0xf6, 0x65, 0x8a, 0xdf, 0x26, 0x80, 0xd9, 0xb3, 0x83, + 0xba, 0xe7, 0xd7, 0xd1, 0xb0, 0x72, 0x45, 0x53, 0x34, 0x1c, 0x93, 0x92, 0x53, 0x49, 0xcf, 0xe5, + 0xd4, 0x73, 0x1c, 0x42, 0xd5, 0x65, 0xea, 0x4f, 0x25, 0x43, 0x2a, 0x8e, 0x49, 0xa0, 0x9b, 0x80, + 0x0e, 0x1c, 0xcf, 0x3a, 0x96, 0x5b, 0x10, 0xe4, 0xb9, 0xac, 0x92, 0x05, 0x7f, 0x28, 0x65, 0x8c, + 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x48, 0x8e, 0x91, 0xd0, 0x0d, 0x7f, 0x03, 0xb4, 0x70, 0xce, + 0x33, 0xdd, 0xe2, 0xf5, 0xab, 0x50, 0xc4, 0x9e, 0xc7, 0x5b, 0x26, 0x3f, 0x62, 0xa8, 0x06, 0xb9, + 0x9e, 0xf8, 0xa1, 0x66, 0x84, 0x72, 0xec, 0x2b, 0x39, 0xd8, 0xa7, 0xeb, 0xbf, 0xd2, 0xe0, 0xd5, + 0x89, 0x23, 0x3b, 0xb1, 0x91, 0x56, 0xf8, 0xa5, 0x5c, 0x0a, 0x37, 0x32, 0x92, 0xc3, 0x31, 0x29, + 0xd1, 0x80, 0x25, 0xe6, 0x7c, 0xa3, 0x0d, 0x58, 0xc2, 0x1a, 0x4e, 0xca, 0xea, 0xff, 0xce, 0x40, + 0xde, 0x7f, 0x8d, 0xfd, 0x97, 0x7b, 0xee, 0x37, 0x20, 0xcf, 0xa4, 0x1d, 0xe5, 0x5e, 0x58, 0x24, + 0x7d, 0xeb, 0x58, 0x71, 0x45, 0xef, 0xd2, 0x25, 0x8c, 0x99, 0x9d, 0x20, 0x66, 0xc3, 0xde, 0x65, + 0xcf, 0x27, 0xe3, 0x80, 0x8f, 0xde, 0x11, 0x8f, 0x4f, 0x93, 0x85, 0xed, 0x60, 0x35, 0x80, 0xc4, + 0x92, 0x7a, 0x3e, 0xa8, 0xcd, 0x2b, 0x70, 0xf9, 0x8d, 0x95, 0x34, 0x7a, 0x00, 0x73, 0x6d, 0xc2, + 0x4d, 0xdb, 0xf1, 0xbb, 0xc0, 0xd4, 0x03, 0x49, 0x1f, 0xac, 0xe9, 0xab, 0x1a, 0x25, 0xe1, 0x93, + 0xfa, 0xc0, 0x01, 0xa0, 0xc8, 0x37, 0xcb, 0x6b, 0xfb, 0xd3, 0xf9, 0x5c, 0x94, 0x6f, 0xdb, 0x5e, + 0x9b, 0x60, 0xc9, 0xd1, 0x1f, 0x6b, 0x50, 0xf2, 0x91, 0xb6, 0xcd, 0x3e, 0x23, 0x68, 0x23, 0x5c, + 0x85, 0x7f, 0xdc, 0xc1, 0x55, 0x3c, 0xfb, 0xde, 0x59, 0x8f, 0x9c, 0x0f, 0x6a, 0x45, 0x29, 0x26, + 0x3e, 0xc2, 0x05, 0xc4, 0xf6, 0x28, 0x73, 0xc9, 0x1e, 0xbd, 0x0e, 0x39, 0xd9, 0x71, 0xab, 0xcd, + 0x0c, 0xfb, 0x3b, 0xd9, 0x95, 0x63, 0x9f, 0xa7, 0x7f, 0x9c, 0x81, 0x72, 0x62, 0x71, 0x29, 0x9a, + 0xb9, 0x70, 0x42, 0x92, 0x49, 0x31, 0x75, 0x9b, 0xfc, 0x3f, 0x95, 0xef, 0x43, 0xde, 0x12, 0xeb, + 0x0b, 0xfe, 0xa9, 0xb5, 0x31, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x22, 0x49, 0x7e, 0x32, 0xac, 0x00, + 0xd1, 0x2d, 0x58, 0xa6, 0x84, 0xd3, 0xb3, 0xad, 0x43, 0x4e, 0x68, 0xbc, 0xed, 0xcf, 0x45, 0xed, + 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x41, 0x85, 0xcc, 0xbf, 0x44, 0x85, 0xd4, 0x1d, 0x98, 0xfd, + 0x1f, 0xb6, 0xe6, 0x3f, 0x80, 0x62, 0xd4, 0x3c, 0x7d, 0xc6, 0x26, 0xf5, 0x1f, 0x41, 0x41, 0x44, + 0x63, 0xd0, 0xf4, 0x5f, 0x72, 0x01, 0x25, 0xaf, 0x86, 0x4c, 0x9a, 0xab, 0x41, 0xdf, 0x04, 0xff, + 0x5f, 0x65, 0xa2, 0x9a, 0xfa, 0x0f, 0xf5, 0x58, 0x35, 0x8d, 0xbf, 0xba, 0x63, 0x93, 0xb2, 0x5f, + 0x68, 0x00, 0xf2, 0xd5, 0xb8, 0x73, 0x42, 0x5c, 0x2e, 0x1c, 0x13, 0x27, 0x30, 0xea, 0x98, 0x4c, + 0x23, 0xc9, 0x41, 0xf7, 0x20, 0xef, 0xc9, 0xa6, 0x4a, 0x8d, 0xae, 0xa6, 0x9c, 0x02, 0x84, 0x51, + 0xe7, 0x77, 0x66, 0x58, 0x81, 0x19, 0xeb, 0x4f, 0x9f, 0x57, 0x67, 0x9e, 0x3d, 0xaf, 0xce, 0x7c, + 0xf4, 0xbc, 0x3a, 0xf3, 0xc1, 0xb0, 0xaa, 0x3d, 0x1d, 0x56, 0xb5, 0x67, 0xc3, 0xaa, 0xf6, 0xd1, + 0xb0, 0xaa, 0x7d, 0x3c, 0xac, 0x6a, 0x8f, 0x3f, 0xa9, 0xce, 0x3c, 0xc8, 0x9c, 0x6c, 0xfc, 0x27, + 0x00, 0x00, 0xff, 0xff, 0x66, 0xe7, 0x2a, 0x84, 0x4b, 0x20, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index a6be57a87c..ea48226b73 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -72,6 +72,14 @@ message APIResource { // namespaced indicates if a resource is namespaced or not. optional bool namespaced = 2; + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + optional string group = 8; + + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + optional string version = 9; + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') optional string kind = 3; @@ -101,6 +109,7 @@ message APIResourceList { // discover the API at /api, which is the root path of the legacy v1 API. // // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { // versions are the api versions that are available. repeated string versions = 1; @@ -249,6 +258,8 @@ message Initializers { // When the last pending initializer is removed, and no failing result is set, the initializers // struct will be set to nil and the object is considered as initialized and visible to all // clients. + // +patchMergeKey=name + // +patchStrategy=merge repeated Initializer pending = 1; // If result is set with the Failure field, the object will be persisted to storage and then deleted, @@ -280,7 +291,7 @@ message LabelSelectorRequirement { optional string key = 1; // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. + // Valid operators are In, NotIn, Exists and DoesNotExist. optional string operator = 2; // values is an array of string values. If the operator is In or NotIn, @@ -291,10 +302,21 @@ message LabelSelectorRequirement { repeated string values = 3; } +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. message ListMeta { - // SelfLink is a URL representing this object. + // selfLink is a URL representing this object. // Populated by the system. // Read-only. // +optional @@ -308,6 +330,14 @@ message ListMeta { // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // list may not be possible if the server configuration has changed or more than a few minutes have + // passed. The resourceVersion field returned when using this continue value will be identical to + // the value in the first response. + optional string continue = 3; } // ListOptions is the query options to a standard REST list call. @@ -343,6 +373,34 @@ message ListOptions { // Timeout for the list/watch call. // +optional optional int64 timeoutSeconds = 5; + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + optional int64 limit = 7; + + // The continue option should be set when retrieving more results from the server. Since this value + // is server defined, clients may only use the continue value from a previous query result with + // identical query parameters (except for the value of continue) and the server may reject a continue + // value it does not recognize. If the specified continue value is no longer valid whether due to + // expiration (generally five to fifteen minutes) or a configuration change on the server the server + // will respond with a 410 ResourceExpired error indicating the client must restart their list without + // the continue field. This field is not supported when watch is true. Clients may start a watch from + // the last resourceVersion value returned by the server and not miss any modifications. + optional string continue = 8; } // MicroTime is version of Time with microsecond level precision. @@ -677,7 +735,9 @@ message StatusDetails { // +optional repeated StatusCause causes = 4; - // If specified, the time in seconds before the operation should be retried. + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. // +optional optional int32 retryAfterSeconds = 5; } @@ -721,6 +781,8 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. @@ -751,6 +813,8 @@ message Verbs { // Event represents a single event to a watched resource. // // +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message WatchEvent { optional string type = 1; diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 0ee7d99ca1..c13fe4af8e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -67,20 +67,33 @@ type Object interface { // ListMetaAccessor retrieves the list interface from an object type ListMetaAccessor interface { - GetListMeta() List + GetListMeta() ListInterface } -// List lets you work with list metadata from any of the versioned or +// Common lets you work with core metadata from any of the versioned or // internal API objects. Attempting to set or retrieve a field on an object that does // not support that field will be a no-op and return a default value. // TODO: move this, and TypeMeta and ListMeta, to a different package -type List interface { +type Common interface { GetResourceVersion() string SetResourceVersion(version string) GetSelfLink() string SetSelfLink(selfLink string) } +// ListInterface lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type ListInterface interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetContinue() string + SetContinue(c string) +} + // Type exposes the type and APIVersion of versioned or internal API objects. // TODO: move this, and TypeMeta and ListMeta, to a different package type Type interface { @@ -94,6 +107,8 @@ func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceV func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ListMeta) GetContinue() string { return meta.Continue } +func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -107,7 +122,7 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *ListMeta) GetListMeta() List { return obj } +func (obj *ListMeta) GetListMeta() ListInterface { return obj } func (obj *ObjectMeta) GetObjectMeta() Object { return obj } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index d55f446b0d..a09d79571c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - "k8s.io/apimachinery/pkg/openapi" + openapi "k8s.io/kube-openapi/pkg/common" "github.com/go-openapi/spec" "github.com/google/gofuzz" @@ -40,8 +40,8 @@ type MicroTime struct { // DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time // type is effectively immutable in the time API, so it is safe to // copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t MicroTime) DeepCopy() MicroTime { - return t +func (t *MicroTime) DeepCopyInto(out *MicroTime) { + *out = *t } // String returns the representation of the time. @@ -74,22 +74,22 @@ func (t *MicroTime) IsZero() bool { } // Before reports whether the time instant t is before u. -func (t MicroTime) Before(u MicroTime) bool { +func (t *MicroTime) Before(u *MicroTime) bool { return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. -func (t MicroTime) Equal(u MicroTime) bool { +func (t *MicroTime) Equal(u *MicroTime) bool { return t.Time.Equal(u.Time) } // BeforeTime reports whether the time instant t is before second-lever precision u. -func (t MicroTime) BeforeTime(u Time) bool { +func (t *MicroTime) BeforeTime(u *Time) bool { return t.Time.Before(u.Time) } // EqualTime reports whether the time instant t is equal to second-lever precision u. -func (t MicroTime) EqualTime(u Time) bool { +func (t *MicroTime) EqualTime(u *Time) bool { return t.Time.Equal(u.Time) } @@ -175,10 +175,10 @@ func (t *MicroTime) Fuzz(c fuzz.Continue) { if t == nil { return } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60*1000*1000), 0) + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) } var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index a1e01f3443..435f6a8f59 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - "k8s.io/apimachinery/pkg/openapi" + openapi "k8s.io/kube-openapi/pkg/common" "github.com/go-openapi/spec" "github.com/google/gofuzz" @@ -37,11 +37,11 @@ type Time struct { time.Time `protobuf:"-"` } -// DeepCopy returns a deep-copy of the Time value. The underlying time.Time +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time // type is effectively immutable in the time API, so it is safe to // copy-by-assign, despite the presence of (unexported) Pointer fields. -func (t Time) DeepCopy() Time { - return t +func (t *Time) DeepCopyInto(out *Time) { + *out = *t } // String returns the representation of the time. @@ -74,12 +74,12 @@ func (t *Time) IsZero() bool { } // Before reports whether the time instant t is before u. -func (t Time) Before(u Time) bool { +func (t *Time) Before(u *Time) bool { return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. -func (t Time) Equal(u Time) bool { +func (t *Time) Equal(u *Time) bool { return t.Time.Equal(u.Time) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index 5520529dd3..ed72186b49 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -42,7 +42,10 @@ func (m *Time) ProtoTime() *Timestamp { } return &Timestamp{ Seconds: m.Time.Unix(), - Nanos: int32(m.Time.Nanosecond()), + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // Nanos: int32(m.Time.Nanosecond()), } } @@ -64,7 +67,11 @@ func (m *Time) Unmarshal(data []byte) error { if err := p.Unmarshal(data); err != nil { return err } - m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + m.Time = time.Unix(p.Seconds, int64(0)).Local() return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index c5ac31f791..13ae66c6f4 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unversioned contains API types that are common to all versions. +// Package v1 contains API types that are common to all versions. // // The package contains two categories of types: // - external (serialized) types that lack their own version (e.g TypeMeta) @@ -29,12 +29,15 @@ import ( "fmt" "strings" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false type TypeMeta struct { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. @@ -55,7 +58,7 @@ type TypeMeta struct { // ListMeta describes metadata that synthetic resources must have, including lists and // various status objects. A resource may have only one of {ObjectMeta, ListMeta}. type ListMeta struct { - // SelfLink is a URL representing this object. + // selfLink is a URL representing this object. // Populated by the system. // Read-only. // +optional @@ -69,6 +72,14 @@ type ListMeta struct { // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // list may not be possible if the server configuration has changed or more than a few minutes have + // passed. The resourceVersion field returned when using this continue value will be identical to + // the value in the first response. + Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -245,7 +256,9 @@ type Initializers struct { // When the last pending initializer is removed, and no failing result is set, the initializers // struct will be set to nil and the object is considered as initialized and visible to all // clients. - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending"` + // +patchMergeKey=name + // +patchStrategy=merge + Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` // If result is set with the Failure field, the object will be persisted to storage and then deleted, // ensuring that other clients can observe the deletion. Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` @@ -298,6 +311,8 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ListOptions is the query options to a standard REST list call. type ListOptions struct { TypeMeta `json:",inline"` @@ -328,8 +343,37 @@ type ListOptions struct { // Timeout for the list/watch call. // +optional TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"` + // The continue option should be set when retrieving more results from the server. Since this value + // is server defined, clients may only use the continue value from a previous query result with + // identical query parameters (except for the value of continue) and the server may reject a continue + // value it does not recognize. If the specified continue value is no longer valid whether due to + // expiration (generally five to fifteen minutes) or a configuration change on the server the server + // will respond with a 410 ResourceExpired error indicating the client must restart their list without + // the continue field. This field is not supported when watch is true. Clients may start a watch from + // the last resourceVersion value returned by the server and not miss any modifications. + Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // ExportOptions is the query options to the standard REST get call. type ExportOptions struct { TypeMeta `json:",inline"` @@ -339,6 +383,8 @@ type ExportOptions struct { Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // GetOptions is the standard query options to the standard REST get call. type GetOptions struct { TypeMeta `json:",inline"` @@ -370,6 +416,8 @@ const ( DeletePropagationForeground DeletionPropagation = "Foreground" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // DeleteOptions may be provided when deleting an API object. type DeleteOptions struct { TypeMeta `json:",inline"` @@ -408,6 +456,8 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // Status is a return value for calls that don't return other objects. type Status struct { TypeMeta `json:",inline"` @@ -469,7 +519,9 @@ type StatusDetails struct { // failure. Not all StatusReasons may provide detailed causes. // +optional Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` - // If specified, the time in seconds before the operation should be retried. + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. // +optional RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` } @@ -574,6 +626,15 @@ const ( // Status code 504 StatusReasonTimeout StatusReason = "Timeout" + // StatusReasonTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. A client may + // always retry the request that led to this error, although the client should wait at least + // the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 429 + StatusReasonTooManyRequests StatusReason = "TooManyRequests" + // StatusReasonBadRequest means that the request itself was invalid, because the request // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the @@ -656,10 +717,25 @@ const ( CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" ) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. // // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. @@ -674,6 +750,8 @@ type APIVersions struct { ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIGroupList is a list of APIGroup, to allow clients to discover the API at // /apis. type APIGroupList struct { @@ -682,6 +760,8 @@ type APIGroupList struct { Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIGroup contains the name, the supported versions, and the preferred version // of a group. type APIGroup struct { @@ -733,6 +813,12 @@ type APIResource struct { SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` // namespaced indicates if a resource is namespaced or not. Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"` // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` // verbs is a list of supported kube verbs (this includes get, list, watch, create, @@ -754,6 +840,8 @@ func (vs Verbs) String() string { return fmt.Sprintf("%v", []string(vs)) } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + // APIResourceList is a list of APIResource, it is used to expose the name of the // resources supported in a specific group and version, and if the resource // is namespaced. @@ -822,7 +910,7 @@ type LabelSelectorRequirement struct { // +patchStrategy=merge Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. + // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` // values is an array of string values. If the operator is In or NotIn, // the values array must be non-empty. If the operator is Exists or DoesNotExist, diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 159164d7c9..49d2de1ef7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -53,6 +53,8 @@ var map_APIResource = map[string]string{ "name": "name is the plural name of the resource.", "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", "shortNames": "shortNames is a list of suggested short names of the resource.", @@ -157,7 +159,7 @@ func (LabelSelector) SwaggerDoc() map[string]string { var map_LabelSelectorRequirement = map[string]string{ "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", } @@ -165,10 +167,21 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { return map_LabelSelectorRequirement } +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -183,6 +196,8 @@ var map_ListOptions = map[string]string{ "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", "timeoutSeconds": "Timeout for the list/watch call.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -295,7 +310,7 @@ var map_StatusDetails = map[string]string{ "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", - "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } func (StatusDetails) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go index a645501a1a..b7ec503184 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -26,6 +26,8 @@ import ( // Event represents a single event to a watched resource. // // +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WatchEvent struct { Type string `json:"type" protobuf:"bytes,1,opt,name=type"` @@ -78,3 +80,10 @@ type InternalEvent watch.Event func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *InternalEvent) DeepCopyObject() runtime.Object { + if c := e.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 6fac96be40..c73e777b50 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -28,619 +28,1069 @@ import ( ) // GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { return []conversion.GeneratedDeepCopyFunc{ - {Fn: DeepCopy_v1_APIGroup, InType: reflect.TypeOf(&APIGroup{})}, - {Fn: DeepCopy_v1_APIGroupList, InType: reflect.TypeOf(&APIGroupList{})}, - {Fn: DeepCopy_v1_APIResource, InType: reflect.TypeOf(&APIResource{})}, - {Fn: DeepCopy_v1_APIResourceList, InType: reflect.TypeOf(&APIResourceList{})}, - {Fn: DeepCopy_v1_APIVersions, InType: reflect.TypeOf(&APIVersions{})}, - {Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, - {Fn: DeepCopy_v1_Duration, InType: reflect.TypeOf(&Duration{})}, - {Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, - {Fn: DeepCopy_v1_GetOptions, InType: reflect.TypeOf(&GetOptions{})}, - {Fn: DeepCopy_v1_GroupKind, InType: reflect.TypeOf(&GroupKind{})}, - {Fn: DeepCopy_v1_GroupResource, InType: reflect.TypeOf(&GroupResource{})}, - {Fn: DeepCopy_v1_GroupVersion, InType: reflect.TypeOf(&GroupVersion{})}, - {Fn: DeepCopy_v1_GroupVersionForDiscovery, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, - {Fn: DeepCopy_v1_GroupVersionKind, InType: reflect.TypeOf(&GroupVersionKind{})}, - {Fn: DeepCopy_v1_GroupVersionResource, InType: reflect.TypeOf(&GroupVersionResource{})}, - {Fn: DeepCopy_v1_Initializer, InType: reflect.TypeOf(&Initializer{})}, - {Fn: DeepCopy_v1_Initializers, InType: reflect.TypeOf(&Initializers{})}, - {Fn: DeepCopy_v1_InternalEvent, InType: reflect.TypeOf(&InternalEvent{})}, - {Fn: DeepCopy_v1_LabelSelector, InType: reflect.TypeOf(&LabelSelector{})}, - {Fn: DeepCopy_v1_LabelSelectorRequirement, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, - {Fn: DeepCopy_v1_ListMeta, InType: reflect.TypeOf(&ListMeta{})}, - {Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, - {Fn: DeepCopy_v1_MicroTime, InType: reflect.TypeOf(&MicroTime{})}, - {Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, - {Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, - {Fn: DeepCopy_v1_Patch, InType: reflect.TypeOf(&Patch{})}, - {Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, - {Fn: DeepCopy_v1_RootPaths, InType: reflect.TypeOf(&RootPaths{})}, - {Fn: DeepCopy_v1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - {Fn: DeepCopy_v1_Status, InType: reflect.TypeOf(&Status{})}, - {Fn: DeepCopy_v1_StatusCause, InType: reflect.TypeOf(&StatusCause{})}, - {Fn: DeepCopy_v1_StatusDetails, InType: reflect.TypeOf(&StatusDetails{})}, - {Fn: DeepCopy_v1_Time, InType: reflect.TypeOf(&Time{})}, - {Fn: DeepCopy_v1_Timestamp, InType: reflect.TypeOf(&Timestamp{})}, - {Fn: DeepCopy_v1_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, - {Fn: DeepCopy_v1_WatchEvent, InType: reflect.TypeOf(&WatchEvent{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*APIGroup).DeepCopyInto(out.(*APIGroup)) + return nil + }, InType: reflect.TypeOf(&APIGroup{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*APIGroupList).DeepCopyInto(out.(*APIGroupList)) + return nil + }, InType: reflect.TypeOf(&APIGroupList{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*APIResource).DeepCopyInto(out.(*APIResource)) + return nil + }, InType: reflect.TypeOf(&APIResource{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*APIResourceList).DeepCopyInto(out.(*APIResourceList)) + return nil + }, InType: reflect.TypeOf(&APIResourceList{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*APIVersions).DeepCopyInto(out.(*APIVersions)) + return nil + }, InType: reflect.TypeOf(&APIVersions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) + return nil + }, InType: reflect.TypeOf(&DeleteOptions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Duration).DeepCopyInto(out.(*Duration)) + return nil + }, InType: reflect.TypeOf(&Duration{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ExportOptions).DeepCopyInto(out.(*ExportOptions)) + return nil + }, InType: reflect.TypeOf(&ExportOptions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GetOptions).DeepCopyInto(out.(*GetOptions)) + return nil + }, InType: reflect.TypeOf(&GetOptions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupKind).DeepCopyInto(out.(*GroupKind)) + return nil + }, InType: reflect.TypeOf(&GroupKind{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupResource).DeepCopyInto(out.(*GroupResource)) + return nil + }, InType: reflect.TypeOf(&GroupResource{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupVersion).DeepCopyInto(out.(*GroupVersion)) + return nil + }, InType: reflect.TypeOf(&GroupVersion{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupVersionForDiscovery).DeepCopyInto(out.(*GroupVersionForDiscovery)) + return nil + }, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupVersionKind).DeepCopyInto(out.(*GroupVersionKind)) + return nil + }, InType: reflect.TypeOf(&GroupVersionKind{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*GroupVersionResource).DeepCopyInto(out.(*GroupVersionResource)) + return nil + }, InType: reflect.TypeOf(&GroupVersionResource{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Initializer).DeepCopyInto(out.(*Initializer)) + return nil + }, InType: reflect.TypeOf(&Initializer{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Initializers).DeepCopyInto(out.(*Initializers)) + return nil + }, InType: reflect.TypeOf(&Initializers{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*InternalEvent).DeepCopyInto(out.(*InternalEvent)) + return nil + }, InType: reflect.TypeOf(&InternalEvent{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*LabelSelector).DeepCopyInto(out.(*LabelSelector)) + return nil + }, InType: reflect.TypeOf(&LabelSelector{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*LabelSelectorRequirement).DeepCopyInto(out.(*LabelSelectorRequirement)) + return nil + }, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*List).DeepCopyInto(out.(*List)) + return nil + }, InType: reflect.TypeOf(&List{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ListMeta).DeepCopyInto(out.(*ListMeta)) + return nil + }, InType: reflect.TypeOf(&ListMeta{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) + return nil + }, InType: reflect.TypeOf(&ListOptions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*MicroTime).DeepCopyInto(out.(*MicroTime)) + return nil + }, InType: reflect.TypeOf(&MicroTime{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) + return nil + }, InType: reflect.TypeOf(&ObjectMeta{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*OwnerReference).DeepCopyInto(out.(*OwnerReference)) + return nil + }, InType: reflect.TypeOf(&OwnerReference{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Patch).DeepCopyInto(out.(*Patch)) + return nil + }, InType: reflect.TypeOf(&Patch{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) + return nil + }, InType: reflect.TypeOf(&Preconditions{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*RootPaths).DeepCopyInto(out.(*RootPaths)) + return nil + }, InType: reflect.TypeOf(&RootPaths{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) + return nil + }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Status).DeepCopyInto(out.(*Status)) + return nil + }, InType: reflect.TypeOf(&Status{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*StatusCause).DeepCopyInto(out.(*StatusCause)) + return nil + }, InType: reflect.TypeOf(&StatusCause{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*StatusDetails).DeepCopyInto(out.(*StatusDetails)) + return nil + }, InType: reflect.TypeOf(&StatusDetails{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Time).DeepCopyInto(out.(*Time)) + return nil + }, InType: reflect.TypeOf(&Time{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Timestamp).DeepCopyInto(out.(*Timestamp)) + return nil + }, InType: reflect.TypeOf(&Timestamp{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*WatchEvent).DeepCopyInto(out.(*WatchEvent)) + return nil + }, InType: reflect.TypeOf(&WatchEvent{})}, } } -// DeepCopy_v1_APIGroup is an autogenerated deepcopy function. -func DeepCopy_v1_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroup) - out := out.(*APIGroup) - *out = *in - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]GroupVersionForDiscovery, len(*in)) - copy(*out, *in) - } - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroup) DeepCopyInto(out *APIGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + out.PreferredVersion = in.PreferredVersion + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup. +func (in *APIGroup) DeepCopy() *APIGroup { + if in == nil { + return nil + } + out := new(APIGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_APIGroupList is an autogenerated deepcopy function. -func DeepCopy_v1_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIGroupList) - out := out.(*APIGroupList) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]APIGroup, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*APIGroup) - } - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupList) DeepCopyInto(out *APIGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList. +func (in *APIGroupList) DeepCopy() *APIGroupList { + if in == nil { + return nil + } + out := new(APIGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_APIResource is an autogenerated deepcopy function. -func DeepCopy_v1_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResource) - out := out.(*APIResource) - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make(Verbs, len(*in)) - copy(*out, *in) - } - if in.ShortNames != nil { - in, out := &in.ShortNames, &out.ShortNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Categories != nil { - in, out := &in.Categories, &out.Categories - *out = make([]string, len(*in)) - copy(*out, *in) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResource) DeepCopyInto(out *APIResource) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource. +func (in *APIResource) DeepCopy() *APIResource { + if in == nil { + return nil + } + out := new(APIResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceList) DeepCopyInto(out *APIResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList. +func (in *APIResourceList) DeepCopy() *APIResourceList { + if in == nil { + return nil + } + out := new(APIResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_APIResourceList is an autogenerated deepcopy function. -func DeepCopy_v1_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIResourceList) - out := out.(*APIResourceList) - *out = *in - if in.APIResources != nil { - in, out := &in.APIResources, &out.APIResources - *out = make([]APIResource, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*APIResource) - } - } - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersions) DeepCopyInto(out *APIVersions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions. +func (in *APIVersions) DeepCopy() *APIVersions { + if in == nil { + return nil + } + out := new(APIVersions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIVersions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_APIVersions is an autogenerated deepcopy function. -func DeepCopy_v1_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*APIVersions) - out := out.(*APIVersions) - *out = *in - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - copy(*out, *in) - } - return nil - } -} - -// DeepCopy_v1_DeleteOptions is an autogenerated deepcopy function. -func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*DeleteOptions) - out := out.(*DeleteOptions) - *out = *in - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*Preconditions) - } + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if *in == nil { + *out = nil + } else { + *out = new(Preconditions) + (*in).DeepCopyInto(*out) } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + if *in == nil { + *out = nil + } else { *out = new(DeletionPropagation) **out = **in } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { + return nil + } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_Duration is an autogenerated deepcopy function. -func DeepCopy_v1_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Duration) - out := out.(*Duration) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Duration) DeepCopyInto(out *Duration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration. +func (in *Duration) DeepCopy() *Duration { + if in == nil { + return nil + } + out := new(Duration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportOptions) DeepCopyInto(out *ExportOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions. +func (in *ExportOptions) DeepCopy() *ExportOptions { + if in == nil { + return nil + } + out := new(ExportOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_ExportOptions is an autogenerated deepcopy function. -func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ExportOptions) - out := out.(*ExportOptions) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetOptions) DeepCopyInto(out *GetOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions. +func (in *GetOptions) DeepCopy() *GetOptions { + if in == nil { + return nil + } + out := new(GetOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GetOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_GetOptions is an autogenerated deepcopy function. -func DeepCopy_v1_GetOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GetOptions) - out := out.(*GetOptions) - *out = *in - return nil - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupKind) DeepCopyInto(out *GroupKind) { + *out = *in + return } -// DeepCopy_v1_GroupKind is an autogenerated deepcopy function. -func DeepCopy_v1_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupKind) - out := out.(*GroupKind) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind. +func (in *GroupKind) DeepCopy() *GroupKind { + if in == nil { return nil } + out := new(GroupKind) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_GroupResource is an autogenerated deepcopy function. -func DeepCopy_v1_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupResource) - out := out.(*GroupResource) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { return nil } + out := new(GroupResource) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_GroupVersion is an autogenerated deepcopy function. -func DeepCopy_v1_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersion) - out := out.(*GroupVersion) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersion) DeepCopyInto(out *GroupVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion. +func (in *GroupVersion) DeepCopy() *GroupVersion { + if in == nil { return nil } + out := new(GroupVersion) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_GroupVersionForDiscovery is an autogenerated deepcopy function. -func DeepCopy_v1_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionForDiscovery) - out := out.(*GroupVersionForDiscovery) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery. +func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery { + if in == nil { return nil } + out := new(GroupVersionForDiscovery) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_GroupVersionKind is an autogenerated deepcopy function. -func DeepCopy_v1_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionKind) - out := out.(*GroupVersionKind) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind. +func (in *GroupVersionKind) DeepCopy() *GroupVersionKind { + if in == nil { return nil } + out := new(GroupVersionKind) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_GroupVersionResource is an autogenerated deepcopy function. -func DeepCopy_v1_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*GroupVersionResource) - out := out.(*GroupVersionResource) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { return nil } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_Initializer is an autogenerated deepcopy function. -func DeepCopy_v1_Initializer(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Initializer) - out := out.(*Initializer) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { return nil } + out := new(Initializer) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_Initializers is an autogenerated deepcopy function. -func DeepCopy_v1_Initializers(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Initializers) - out := out.(*Initializers) - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializers) DeepCopyInto(out *Initializers) { + *out = *in + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]Initializer, len(*in)) + copy(*out, *in) + } + if in.Result != nil { + in, out := &in.Result, &out.Result + if *in == nil { + *out = nil + } else { + *out = new(Status) + (*in).DeepCopyInto(*out) } - if in.Result != nil { - in, out := &in.Result, &out.Result - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*Status) - } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. +func (in *Initializers) DeepCopy() *Initializers { + if in == nil { + return nil + } + out := new(Initializers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { + *out = *in + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent. +func (in *InternalEvent) DeepCopy() *InternalEvent { + if in == nil { + return nil + } + out := new(InternalEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector. +func (in *LabelSelector) DeepCopy() *LabelSelector { + if in == nil { + return nil + } + out := new(LabelSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement. +func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { + if in == nil { + return nil + } + out := new(LabelSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_InternalEvent is an autogenerated deepcopy function. -func DeepCopy_v1_InternalEvent(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*InternalEvent) - out := out.(*InternalEvent) - *out = *in - // in.Object is kind 'Interface' - if in.Object != nil { - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err - } else { - out.Object = *newVal.(*runtime.Object) - } - } - return nil - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListMeta) DeepCopyInto(out *ListMeta) { + *out = *in + return } -// DeepCopy_v1_LabelSelector is an autogenerated deepcopy function. -func DeepCopy_v1_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelector) - out := out.(*LabelSelector) - *out = *in - if in.MatchLabels != nil { - in, out := &in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*LabelSelectorRequirement) - } - } - } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta. +func (in *ListMeta) DeepCopy() *ListMeta { + if in == nil { return nil } + out := new(ListMeta) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_LabelSelectorRequirement is an autogenerated deepcopy function. -func DeepCopy_v1_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*LabelSelectorRequirement) - out := out.(*LabelSelectorRequirement) - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil - } -} - -// DeepCopy_v1_ListMeta is an autogenerated deepcopy function. -func DeepCopy_v1_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListMeta) - out := out.(*ListMeta) - *out = *in - return nil - } -} - -// DeepCopy_v1_ListOptions is an autogenerated deepcopy function. -func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ListOptions) - out := out.(*ListOptions) - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } -// DeepCopy_v1_MicroTime is an autogenerated deepcopy function. -func DeepCopy_v1_MicroTime(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*MicroTime) - out := out.(*MicroTime) - *out = in.DeepCopy() +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. +func (in *MicroTime) DeepCopy() *MicroTime { + if in == nil { return nil } + out := new(MicroTime) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_ObjectMeta is an autogenerated deepcopy function. -func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ObjectMeta) - out := out.(*ObjectMeta) - *out = *in - out.CreationTimestamp = in.CreationTimestamp.DeepCopy() - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + if *in == nil { + *out = nil + } else { *out = new(Time) - **out = (*in).DeepCopy() + (*in).DeepCopyInto(*out) } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + if *in == nil { + *out = nil + } else { *out = new(int64) **out = **in } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(*in)) - for i := range *in { - if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { - return err - } else { - (*out)[i] = *newVal.(*OwnerReference) - } - } - } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*Initializers) - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return nil } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + if *in == nil { + *out = nil + } else { + *out = new(Initializers) + (*in).DeepCopyInto(*out) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return } -// DeepCopy_v1_OwnerReference is an autogenerated deepcopy function. -func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*OwnerReference) - out := out.(*OwnerReference) - *out = *in - if in.Controller != nil { - in, out := &in.Controller, &out.Controller +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - if in.BlockOwnerDeletion != nil { - in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + if *in == nil { + *out = nil + } else { *out = new(bool) **out = **in } - return nil } + return } -// DeepCopy_v1_Patch is an autogenerated deepcopy function. -func DeepCopy_v1_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Patch) - out := out.(*Patch) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference. +func (in *OwnerReference) DeepCopy() *OwnerReference { + if in == nil { return nil } + out := new(OwnerReference) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_Preconditions is an autogenerated deepcopy function. -func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Preconditions) - out := out.(*Preconditions) - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patch) DeepCopyInto(out *Patch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch. +func (in *Patch) DeepCopy() *Patch { + if in == nil { + return nil + } + out := new(Patch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + if *in == nil { + *out = nil + } else { *out = new(types.UID) **out = **in } - return nil } + return } -// DeepCopy_v1_RootPaths is an autogenerated deepcopy function. -func DeepCopy_v1_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RootPaths) - out := out.(*RootPaths) - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]string, len(*in)) - copy(*out, *in) - } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { return nil } + out := new(Preconditions) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_ServerAddressByClientCIDR is an autogenerated deepcopy function. -func DeepCopy_v1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - *out = *in +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootPaths) DeepCopyInto(out *RootPaths) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths. +func (in *RootPaths) DeepCopy() *RootPaths { + if in == nil { return nil } + out := new(RootPaths) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_Status is an autogenerated deepcopy function. -func DeepCopy_v1_Status(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Status) - out := out.(*Status) - *out = *in - if in.Details != nil { - in, out := &in.Details, &out.Details - if newVal, err := c.DeepCopy(*in); err != nil { - return err - } else { - *out = newVal.(*StatusDetails) - } - } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. +func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { + if in == nil { return nil } + out := new(ServerAddressByClientCIDR) + in.DeepCopyInto(out) + return out } -// DeepCopy_v1_StatusCause is an autogenerated deepcopy function. -func DeepCopy_v1_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusCause) - out := out.(*StatusCause) - *out = *in - return nil - } -} - -// DeepCopy_v1_StatusDetails is an autogenerated deepcopy function. -func DeepCopy_v1_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StatusDetails) - out := out.(*StatusDetails) - *out = *in - if in.Causes != nil { - in, out := &in.Causes, &out.Causes - *out = make([]StatusCause, len(*in)) - copy(*out, *in) - } - return nil - } -} - -// DeepCopy_v1_Time is an autogenerated deepcopy function. -func DeepCopy_v1_Time(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Time) - out := out.(*Time) - *out = in.DeepCopy() - return nil - } -} - -// DeepCopy_v1_Timestamp is an autogenerated deepcopy function. -func DeepCopy_v1_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Timestamp) - out := out.(*Timestamp) - *out = *in - return nil - } -} - -// DeepCopy_v1_TypeMeta is an autogenerated deepcopy function. -func DeepCopy_v1_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - *out = *in - return nil - } -} - -// DeepCopy_v1_WatchEvent is an autogenerated deepcopy function. -func DeepCopy_v1_WatchEvent(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*WatchEvent) - out := out.(*WatchEvent) - *out = *in - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Details != nil { + in, out := &in.Details, &out.Details + if *in == nil { + *out = nil } else { - out.Object = *newVal.(*runtime.RawExtension) + *out = new(StatusDetails) + (*in).DeepCopyInto(*out) } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Status) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCause) DeepCopyInto(out *StatusCause) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause. +func (in *StatusCause) DeepCopy() *StatusCause { + if in == nil { + return nil + } + out := new(StatusCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDetails) DeepCopyInto(out *StatusDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails. +func (in *StatusDetails) DeepCopy() *StatusDetails { + if in == nil { + return nil + } + out := new(StatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timestamp) DeepCopyInto(out *Timestamp) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp. +func (in *Timestamp) DeepCopy() *Timestamp { + if in == nil { + return nil + } + out := new(Timestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchEvent) DeepCopyInto(out *WatchEvent) { + *out = *in + in.Object.DeepCopyInto(&out.Object) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent. +func (in *WatchEvent) DeepCopy() *WatchEvent { + if in == nil { + return nil + } + out := new(WatchEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatchEvent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD index 643614ee48..7e100d4fa4 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -15,8 +13,8 @@ go_test( "deep_copy_test.go", "helper_test.go", ], + importpath = "k8s.io/apimachinery/pkg/conversion", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", @@ -33,6 +31,23 @@ go_library( "doc.go", "helper.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/conversion", deps = ["//vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/conversion/queryparams:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/conversion/unstructured:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD index 8eeabc5f11..8b871ab126 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,16 +12,29 @@ go_library( "convert.go", "doc.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/conversion/queryparams", ) go_test( name = "go_default_xtest", srcs = ["convert_test.go"], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/conversion/queryparams_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 30f717b2ce..17b3666170 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -90,7 +90,14 @@ func customMarshalValue(value reflect.Value) (reflect.Value, bool) { marshaler, ok := value.Interface().(Marshaler) if !ok { - return reflect.Value{}, false + if !isPointerKind(value.Kind()) && value.CanAddr() { + marshaler, ok = value.Addr().Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + } else { + return reflect.Value{}, false + } } // Don't invoke functions on nil pointers diff --git a/vendor/k8s.io/apimachinery/pkg/fields/BUILD b/vendor/k8s.io/apimachinery/pkg/fields/BUILD index 7c44f5fd82..2bae135039 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/fields/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,8 +12,8 @@ go_test( "fields_test.go", "selector_test.go", ], + importpath = "k8s.io/apimachinery/pkg/fields", library = ":go_default_library", - tags = ["automanaged"], ) go_library( @@ -26,6 +24,19 @@ go_library( "requirements.go", "selector.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/fields", deps = ["//vendor/k8s.io/apimachinery/pkg/selection:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go index 1305dde086..273e9a2c1b 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -50,6 +50,9 @@ type Selector interface { // String returns a human readable string that represents this selector. String() string + + // Make a deep copy of the selector. + DeepCopySelector() Selector } // Everything returns a selector that matches all fields. @@ -99,6 +102,15 @@ func (t *hasTerm) String() string { return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) } +func (t *hasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(hasTerm) + *out = *t + return out +} + type notHasTerm struct { field, value string } @@ -138,6 +150,15 @@ func (t *notHasTerm) String() string { return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) } +func (t *notHasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(notHasTerm) + *out = *t + return out +} + type andTerm []Selector func (t andTerm) Matches(ls Fields) bool { @@ -207,6 +228,17 @@ func (t andTerm) String() string { return strings.Join(terms, ",") } +func (t andTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := make([]Selector, len(t)) + for i := range t { + out[i] = t[i].DeepCopySelector() + } + return andTerm(out) +} + // SelectorFromSet returns a Selector which will match exactly the given Set. A // nil Set is considered equivalent to Everything(). func SelectorFromSet(ls Set) Selector { diff --git a/vendor/k8s.io/apimachinery/pkg/labels/BUILD b/vendor/k8s.io/apimachinery/pkg/labels/BUILD index 5e4d9143b3..3612719d3b 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/labels/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,8 +12,8 @@ go_test( "labels_test.go", "selector_test.go", ], + importpath = "k8s.io/apimachinery/pkg/labels", library = ":go_default_library", - tags = ["automanaged"], deps = [ "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -28,12 +26,27 @@ go_library( "doc.go", "labels.go", "selector.go", + "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/labels", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 50b41f99d2..ac123033a0 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -51,6 +51,9 @@ type Selector interface { // If there are querying parameters, it will return converted requirements and selectable=true. // If this selector doesn't want to select anything, it will return selectable=false. Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector } // Everything returns a selector that matches all labels. @@ -65,6 +68,7 @@ func (n nothingSelector) Empty() bool { return false } func (n nothingSelector) String() string { return "" } func (n nothingSelector) Add(_ ...Requirement) Selector { return n } func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } // Nothing returns a selector that matches no labels func Nothing() Selector { @@ -78,6 +82,21 @@ func NewSelector() Selector { type internalSelector []Requirement +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + // ByKey sorts requirements by key to obtain deterministic parser type ByKey []Requirement @@ -91,6 +110,7 @@ func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } // The zero value of Requirement is invalid. // Requirement implements both set based match and exact match // Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true type Requirement struct { key string operator selection.Operator diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go new file mode 100644 index 0000000000..80ba3fb751 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -0,0 +1,59 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package labels + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Requirement).DeepCopyInto(out.(*Requirement)) + return nil + }, InType: reflect.TypeOf(&Requirement{})}, + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/BUILD b/vendor/k8s.io/apimachinery/pkg/openapi/BUILD deleted file mode 100644 index 4b3dfd4a0c..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/openapi/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "doc.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD index 4fd7d5a7ee..2085e643fd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["swagger_doc_generator_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime", library = ":go_default_library", - tags = ["automanaged"], ) go_library( @@ -36,7 +34,7 @@ go_library( "types_proto.go", "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/runtime", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", @@ -54,7 +52,7 @@ go_test( "extension_test.go", "scheme_test.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/runtime_test", deps = [ "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", @@ -63,6 +61,31 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/testing:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/runtime/testing:all-srcs", + ], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index 8eedffc9c3..afe4fab15e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -19,6 +19,7 @@ limitations under the License. package runtime import ( + "fmt" "reflect" "strconv" "strings" @@ -26,6 +27,20 @@ import ( "k8s.io/apimachinery/pkg/conversion" ) +// DefaultFieldSelectorConversion auto-accepts metav1 values for name and namespace. +// A cluster scoped resource specifying namespace empty works fine and specifying a particular +// namespace will return no results, as expected. +func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace") + } +} + // JSONKeyMapper uses the struct tags on a conversion to determine the key value for // the other side. Use when mapping from a map[string]* to a struct or vice versa. func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index e8825a787a..2cdac9e141 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -30,6 +30,12 @@ type encodable struct { } func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) DeepCopyObject() Object { + var out encodable = e + out.obj = e.obj.DeepCopyObject() + copy(out.versions, e.versions) + return out +} // NewEncodable creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 4d23ee9ee3..737e2e9ff5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "bytes" "encoding/json" "errors" ) @@ -25,7 +26,9 @@ func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - re.Raw = append(re.Raw[0:0], in...) + if !bytes.Equal(in, []byte("null")) { + re.Raw = append(re.Raw[0:0], in...) + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 57fc840785..b3fd09c3c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -89,7 +89,7 @@ message RawExtension { // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. // -// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true message TypeMeta { @@ -107,6 +107,7 @@ message TypeMeta { // metadata and field mutatation. // // +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true // +k8s:openapi-gen=true message Unknown { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index fcb18ba111..c90eef5ac3 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -203,13 +203,6 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } -// ObjectCopier duplicates an object. -type ObjectCopier interface { - // Copy returns an exact copy of the provided Object, or an error if the - // copy could not be completed. - Copy(Object) (Object, error) -} - // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -234,6 +227,7 @@ type SelfLinker interface { // to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. type Object interface { GetObjectKind() schema.ObjectKind + DeepCopyObject() Object } // Unstructured objects store values as map[string]interface{}, with only values that can be serialized @@ -242,10 +236,14 @@ type Unstructured interface { // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected // to bypass conversion. IsUnstructuredObject() - // IsList returns true if this type is a list or matches the list convention - has an array called "items". - IsList() bool // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. UnstructuredContent() map[string]interface{} + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // EachListItem should pass a single item out of the list as an Object to the provided function. Any + // error should terminate the iteration. If IsList() returns false, this method should return an error + // instead of calling the provided function. + EachListItem(func(Object) error) error } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go index 2ec6db8201..eeb380c3dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -28,7 +28,7 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) } -func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } // GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind // interface if no objects are provided, or the ObjectKind interface of the object in the diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD index 660497064b..032d866edb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["group_version_test.go"], + importpath = "k8s.io/apimachinery/pkg/runtime/schema", library = ":go_default_library", - tags = ["automanaged"], ) go_library( @@ -22,6 +20,25 @@ go_library( "group_version.go", "interfaces.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/runtime/schema", deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 6c9475fa0d..c597fcf99f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -420,20 +420,6 @@ func (s *Scheme) Default(src Object) { } } -// Copy does a deep copy of an API object. -func (s *Scheme) Copy(src Object) (Object, error) { - dst, err := s.DeepCopy(src) - if err != nil { - return nil, err - } - return dst.(Object), nil -} - -// Performs a deep copy of the given object. -func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { - return s.cloner.DeepCopy(src) -} - // Convert will attempt to convert in into out. Both must be pointers. For easy // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, @@ -454,11 +440,11 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { // versioned representation to an unversioned one. func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { if s.fieldLabelConversionFuncs[version] == nil { - return "", "", fmt.Errorf("No field label conversion function found for version: %s", version) + return DefaultMetaV1FieldSelectorConversion(label, value) } conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind] if !ok { - return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind) + return DefaultMetaV1FieldSelectorConversion(label, value) } return conversionFunc(label, value) } @@ -501,9 +487,9 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // TODO: when we move to server API versions, we should completely remove the unversioned concept if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } return nil, NewNotRegisteredErrForTarget(t, target) @@ -512,16 +498,16 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( // target wants to use the existing type, set kind and return (no conversion necessary) for _, kind := range kinds { if gvk == kind { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } } // type is unversioned, no conversion necessary if unversionedKind, ok := s.unversionedTypes[t]; ok { if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { - return copyAndSetTargetKind(copy, s, in, gvk) + return copyAndSetTargetKind(copy, in, gvk) } - return copyAndSetTargetKind(copy, s, in, unversionedKind) + return copyAndSetTargetKind(copy, in, unversionedKind) } out, err := s.New(gvk) @@ -530,11 +516,7 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } if copy { - copied, err := s.Copy(in) - if err != nil { - return nil, err - } - in = copied + in = in.DeepCopyObject() } flags, meta := s.generateConvertMeta(in) @@ -553,13 +535,9 @@ func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFl } // copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. -func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { +func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) { if copy { - copied, err := copier.Copy(obj) - if err != nil { - return nil, err - } - obj = copied + obj = obj.DeepCopyObject() } setTargetKind(obj, kind) return obj, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go index 29722d52e7..5bc642bc8e 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -71,7 +71,7 @@ func fmtRawDoc(rawDoc string) string { delPrevChar() buffer.WriteString("\n\n") case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs - case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl + case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators default: if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { delPrevChar() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index f972c5e697..e4515d8ed0 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -30,7 +30,7 @@ package runtime // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. // -// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true type TypeMeta struct { @@ -106,6 +106,7 @@ type RawExtension struct { // metadata and field mutatation. // // +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true // +k8s:openapi-gen=true type Unknown struct { @@ -124,6 +125,9 @@ type Unknown struct { // VersionedObjects is used by Decoders to give callers a way to access all versions // of an object during the decoding process. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true type VersionedObjects struct { // Objects is the set of objects retrieved during decoding, in order of conversion. // The 0 index is the object as serialized on the wire. If conversion has occurred, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 54ce6ad59e..d347461ac6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -26,58 +26,114 @@ import ( ) // GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { return []conversion.GeneratedDeepCopyFunc{ - {Fn: DeepCopy_runtime_RawExtension, InType: reflect.TypeOf(&RawExtension{})}, - {Fn: DeepCopy_runtime_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, - {Fn: DeepCopy_runtime_Unknown, InType: reflect.TypeOf(&Unknown{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*RawExtension).DeepCopyInto(out.(*RawExtension)) + return nil + }, InType: reflect.TypeOf(&RawExtension{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Unknown).DeepCopyInto(out.(*Unknown)) + return nil + }, InType: reflect.TypeOf(&Unknown{})}, + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*VersionedObjects).DeepCopyInto(out.(*VersionedObjects)) + return nil + }, InType: reflect.TypeOf(&VersionedObjects{})}, } } -// DeepCopy_runtime_RawExtension is an autogenerated deepcopy function. -func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*RawExtension) - out := out.(*RawExtension) - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - // in.Object is kind 'Interface' - if in.Object != nil { - if newVal, err := c.DeepCopy(&in.Object); err != nil { - return err +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawExtension) DeepCopyInto(out *RawExtension) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension. +func (in *RawExtension) DeepCopy() *RawExtension { + if in == nil { + return nil + } + out := new(RawExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unknown) DeepCopyInto(out *Unknown) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown. +func (in *Unknown) DeepCopy() *Unknown { + if in == nil { + return nil + } + out := new(Unknown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Unknown) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]Object, len(*in)) + for i := range *in { + if (*in)[i] == nil { + (*out)[i] = nil } else { - out.Object = *newVal.(*Object) + (*out)[i] = (*in)[i].DeepCopyObject() } } - return nil } + return } -// DeepCopy_runtime_TypeMeta is an autogenerated deepcopy function. -func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TypeMeta) - out := out.(*TypeMeta) - *out = *in +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. +func (in *VersionedObjects) DeepCopy() *VersionedObjects { + if in == nil { return nil } + out := new(VersionedObjects) + in.DeepCopyInto(out) + return out } -// DeepCopy_runtime_Unknown is an autogenerated deepcopy function. -func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Unknown) - out := out.(*Unknown) - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *VersionedObjects) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } else { return nil } } diff --git a/vendor/k8s.io/apimachinery/pkg/selection/BUILD b/vendor/k8s.io/apimachinery/pkg/selection/BUILD index 55859fef69..3790df9af2 100644 --- a/vendor/k8s.io/apimachinery/pkg/selection/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/selection/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -10,5 +8,18 @@ load( go_library( name = "go_default_library", srcs = ["operator.go"], + importpath = "k8s.io/apimachinery/pkg/selection", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/types/BUILD b/vendor/k8s.io/apimachinery/pkg/types/BUILD index f6dadfcca8..3db635c8a3 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/types/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -16,5 +14,18 @@ go_library( "patch.go", "uid.go", ], + importpath = "k8s.io/apimachinery/pkg/types", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD index 196de9f621..d13ff24071 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["errors_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/errors", library = ":go_default_library", - tags = ["automanaged"], ) go_library( @@ -21,5 +19,18 @@ go_library( "doc.go", "errors.go", ], + importpath = "k8s.io/apimachinery/pkg/util/errors", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index bdea0e16c7..26e7eb2082 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -21,7 +21,7 @@ import ( "fmt" ) -// MessagesgCountMap contains occurance for each error message. +// MessageCountMap contains occurance for each error message. type MessageCountMap map[string]int // Aggregate represents an object that contains multiple errors, but does not diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD index 92968e2c8c..2e3fe65161 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["intstr_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/intstr", library = ":go_default_library", - tags = ["automanaged"], deps = ["//vendor/github.com/ghodss/yaml:go_default_library"], ) @@ -22,12 +20,31 @@ go_library( "generated.pb.go", "intstr.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", + "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 02586b3481..04a77bb6b4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "k8s.io/apimachinery/pkg/openapi" + openapi "k8s.io/kube-openapi/pkg/common" "github.com/go-openapi/spec" "github.com/golang/glog" diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD index 09a89c1179..d7390ed5c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -17,8 +15,8 @@ go_test( "port_split_test.go", "util_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/net", library = ":go_default_library", - tags = ["automanaged"], deps = ["//vendor/github.com/spf13/pflag:go_default_library"], ) @@ -31,10 +29,23 @@ go_library( "port_split.go", "util.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/net", deps = [ "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index adb80813be..b544a60a50 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -26,6 +26,7 @@ import ( "net/http" "net/url" "os" + "path" "strconv" "strings" @@ -33,6 +34,26 @@ import ( "golang.org/x/net/http2" ) +// JoinPreservingTrailingSlash does a path.Join of the specified elements, +// preserving any trailing slash on the last non-empty segment +func JoinPreservingTrailingSlash(elem ...string) string { + // do the basic path join + result := path.Join(elem...) + + // find the last non-empty segment + for i := len(elem) - 1; i >= 0; i-- { + if len(elem[i]) > 0 { + // if the last segment ended in a slash, ensure our result does as well + if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") { + result += "/" + } + break + } + } + + return result +} + // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -108,7 +129,7 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) { case RoundTripperWrapper: return DialerFor(transport.WrappedRoundTripper()) default: - return nil, fmt.Errorf("unknown transport type: %v", transport) + return nil, fmt.Errorf("unknown transport type: %T", transport) } } @@ -129,7 +150,7 @@ func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { case RoundTripperWrapper: return TLSClientConfig(transport.WrappedRoundTripper()) default: - return nil, fmt.Errorf("unknown transport type: %v", transport) + return nil, fmt.Errorf("unknown transport type: %T", transport) } } @@ -235,8 +256,11 @@ func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { // NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if // no matching CIDRs are found func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it noProxyEnv := os.Getenv("NO_PROXY") + if noProxyEnv == "" { + noProxyEnv = os.Getenv("no_proxy") + } noProxyRules := strings.Split(noProxyEnv, ",") cidrs := []*net.IPNet{} @@ -277,6 +301,13 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } } +// DialerFunc implements Dialer for the provided function. +type DialerFunc func(req *http.Request) (net.Conn, error) + +func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) { + return fn(req) +} + // Dialer dials a host and writes a request to it. type Dialer interface { // Dial connects to the host specified by req's URL, writes the request to the connection, and diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index a1e53d2e43..42816bd705 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -29,18 +29,47 @@ import ( "github.com/golang/glog" ) +type AddressFamily uint + +const ( + familyIPv4 AddressFamily = 4 + familyIPv6 AddressFamily = 6 +) + +const ( + ipv4RouteFile = "/proc/net/route" + ipv6RouteFile = "/proc/net/ipv6_route" +) + type Route struct { Interface string Destination net.IP Gateway net.IP - // TODO: add more fields here if needed + Family AddressFamily } -func getRoutes(input io.Reader) ([]Route, error) { - routes := []Route{} - if input == nil { - return nil, fmt.Errorf("input is nil") +type RouteFile struct { + name string + parse func(input io.Reader) ([]Route, error) +} + +var ( + v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes} + v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes} +) + +func (rf RouteFile) extract() ([]Route, error) { + file, err := os.Open(rf.name) + if err != nil { + return nil, err } + defer file.Close() + return rf.parse(file) +} + +// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes. +func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} scanner := bufio.NewReader(input) for { line, err := scanner.ReadString('\n') @@ -52,24 +81,71 @@ func getRoutes(input io.Reader) ([]Route, error) { continue } fields := strings.Fields(line) - routes = append(routes, Route{}) - route := &routes[len(routes)-1] - route.Interface = fields[0] - ip, err := parseIP(fields[1]) + // Interested in fields: + // 0 - interface name + // 1 - destination address + // 2 - gateway + dest, err := parseIP(fields[1], familyIPv4) if err != nil { return nil, err } - route.Destination = ip - ip, err = parseIP(fields[2]) + gw, err := parseIP(fields[2], familyIPv4) if err != nil { return nil, err } - route.Gateway = ip + if !dest.Equal(net.IPv4zero) { + continue + } + routes = append(routes, Route{ + Interface: fields[0], + Destination: dest, + Gateway: gw, + Family: familyIPv4, + }) } return routes, nil } -func parseIP(str string) (net.IP, error) { +func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - destination address + // 4 - gateway + // 9 - interface name + dest, err := parseIP(fields[0], familyIPv6) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[4], familyIPv6) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv6zero) { + continue + } + if gw.Equal(net.IPv6zero) { + continue // loopback + } + routes = append(routes, Route{ + Interface: fields[9], + Destination: dest, + Gateway: gw, + Family: familyIPv6, + }) + } + return routes, nil +} + +// parseIP takes the hex IP address string from route file and converts it +// to a net.IP address. For IPv4, the value must be converted to big endian. +func parseIP(str string, family AddressFamily) (net.IP, error) { if str == "" { return nil, fmt.Errorf("input is nil") } @@ -77,11 +153,16 @@ func parseIP(str string) (net.IP, error) { if err != nil { return nil, err } - //TODO add ipv6 support - if len(bytes) != net.IPv4len { - return nil, fmt.Errorf("only IPv4 is supported") + if family == familyIPv4 { + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("invalid IPv4 address in route") + } + return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil + } + // Must be IPv6 + if len(bytes) != net.IPv6len { + return nil, fmt.Errorf("invalid IPv6 address in route") } - bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] return net.IP(bytes), nil } @@ -96,10 +177,13 @@ func isInterfaceUp(intf *net.Interface) bool { return false } -//getFinalIP method receives all the IP addrs of a Interface -//and returns a nil if the address is Loopback, Ipv6, link-local or nil. -//It returns a valid IPv4 if an Ipv4 address is found in the array. -func getFinalIP(addrs []net.Addr) (net.IP, error) { +func isLoopbackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +// getMatchingGlobalIP returns the first valid global unicast address of the given +// 'family' from the list of 'addrs'. +func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { glog.V(4).Infof("Checking addr %s.", addrs[i].String()) @@ -107,17 +191,15 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { if err != nil { return nil, err } - //Only IPv4 - //TODO : add IPv6 support - if ip.To4() != nil { - if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + if memberOf(ip, family) { + if ip.IsGlobalUnicast() { glog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Loopback/link-local found %v", ip) + glog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not a valid IPv4 address", ip) + glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -125,7 +207,9 @@ func getFinalIP(addrs []net.Addr) (net.IP, error) { return nil, nil } -func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { +// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The +// interface must be up, the IP must in the family requested, and the IP must be a global unicast address. +func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { intf, err := nw.InterfaceByName(intfName) if err != nil { return nil, err @@ -136,131 +220,161 @@ func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { return nil, err } glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) - finalIP, err := getFinalIP(addrs) + matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } - if finalIP != nil { - glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) - return finalIP, nil + if matchingIP != nil { + glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + return matchingIP, nil } } - return nil, nil } -func flagsSet(flags net.Flags, test net.Flags) bool { - return flags&test != 0 +// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +func memberOf(ip net.IP, family AddressFamily) bool { + if ip.To4() != nil { + return family == familyIPv4 + } else { + return family == familyIPv6 + } } -func flagsClear(flags net.Flags, test net.Flags) bool { - return flags&test == 0 -} - -func chooseHostInterfaceNativeGo() (net.IP, error) { - intfs, err := net.Interfaces() +// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that +// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. +// Searches for IPv4 addresses, and then IPv6 addresses. +func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { + intfs, err := nw.Interfaces() if err != nil { return nil, err } - i := 0 - var ip net.IP - for i = range intfs { - if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { - addrs, err := intfs[i].Addrs() + if len(intfs) == 0 { + return nil, fmt.Errorf("no interfaces found on host.") + } + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, intf := range intfs { + if !isInterfaceUp(&intf) { + glog.V(4).Infof("Skipping: down interface %q", intf.Name) + continue + } + if isLoopbackOrPointToPoint(&intf) { + glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + continue + } + addrs, err := nw.Addrs(&intf) if err != nil { return nil, err } - if len(addrs) > 0 { - for _, addr := range addrs { - if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { - if addrIP.To4() != nil { - ip = addrIP.To4() - if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { - break - } - } - } + if len(addrs) == 0 { + glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + continue + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } - if ip != nil { - // This interface should suffice. - break + if !memberOf(ip, family) { + glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + continue } + // TODO: Decide if should open up to allow IPv6 LLAs in future. + if !ip.IsGlobalUnicast() { + glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + continue + } + glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + return ip, nil } } } - if ip == nil { - return nil, fmt.Errorf("no acceptable interface from host") - } - glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) - return ip, nil + return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") } -//ChooseHostInterface is a method used fetch an IP for a daemon. -//It uses data from /proc/net/route file. -//For a node with no internet connection ,it returns error -//For a multi n/w interface node it returns the IP of the interface with gateway on it. +// ChooseHostInterface is a method used fetch an IP for a daemon. +// If there is no routing info file, it will choose a global IP from the system +// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the +// IP of the interface with a gateway on it (with priority given to IPv4). For a node +// with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { - inFile, err := os.Open("/proc/net/route") + var nw networkInterfacer = networkInterface{} + if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { + return chooseIPFromHostInterfaces(nw) + } + routes, err := getAllDefaultRoutes() if err != nil { - if os.IsNotExist(err) { - return chooseHostInterfaceNativeGo() - } return nil, err } - defer inFile.Close() - var nw networkInterfacer = networkInterface{} - return chooseHostInterfaceFromRoute(inFile, nw) + return chooseHostInterfaceFromRoute(routes, nw) } +// networkInterfacer defines an interface for several net library functions. Production +// code will forward to net library functions, and unit tests will override the methods +// for testing purposes. type networkInterfacer interface { InterfaceByName(intfName string) (*net.Interface, error) Addrs(intf *net.Interface) ([]net.Addr, error) + Interfaces() ([]net.Interface, error) } +// networkInterface implements the networkInterfacer interface for production code, just +// wrapping the underlying net library function calls. type networkInterface struct{} func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - intf, err := net.InterfaceByName(intfName) - if err != nil { - return nil, err - } - return intf, nil + return net.InterfaceByName(intfName) } func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - return addrs, nil + return intf.Addrs() } -func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { - routes, err := getRoutes(inFile) +func (_ networkInterface) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable +// to read the IPv4 routing info file, we return an error. If unable to read the IPv6 +// routing info file (which is optional), we'll just use the IPv4 route information. +// Using all the routing info, if no default routes are found, an error is returned. +func getAllDefaultRoutes() ([]Route, error) { + routes, err := v4File.extract() if err != nil { return nil, err } - zero := net.IP{0, 0, 0, 0} - var finalIP net.IP - for i := range routes { - //find interface with gateway - if routes[i].Destination.Equal(zero) { - glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) - finalIP, err := getIPFromInterface(routes[i].Interface, nw) + v6Routes, _ := v6File.extract() + routes = append(routes, v6Routes...) + if len(routes) == 0 { + return nil, fmt.Errorf("No default routes.") + } + return routes, nil +} + +// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a +// global IP address from the interface for the route. Will first look all each IPv4 route for +// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + for _, route := range routes { + if route.Family != family { + continue + } + glog.V(4).Infof("Default route transits interface %q", route.Interface) + finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Choosing IP %v ", finalIP) + glog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No valid IP found") - if finalIP == nil { - return nil, fmt.Errorf("Unable to select an IP.") - } - return nil, nil + glog.V(4).Infof("No active IP found by looking at default routes") + return nil, fmt.Errorf("unable to select an IP from default routes.") } // If bind-address is usable, return it directly diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD index d6063ee6dc..40892fa783 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,13 +9,26 @@ load( go_test( name = "go_default_test", srcs = ["runtime_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/runtime", library = ":go_default_library", - tags = ["automanaged"], ) go_library( name = "go_default_library", srcs = ["runtime.go"], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/runtime", deps = ["//vendor/github.com/golang/glog:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD index 97b4bb9082..5a6175ad4f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load("@io_kubernetes_build//defs:go.bzl", "go_genrule") load( "@io_bazel_rules_go//go:def.bzl", @@ -19,7 +17,7 @@ go_library( "int64.go", "string.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/sets", ) go_genrule( @@ -36,9 +34,9 @@ go_genrule( "string.go", ], cmd = """ -$(location //cmd/libs/go2idl/set-gen) \ +$(location //vendor/k8s.io/code-generator/cmd/set-gen) \ --input-dirs ./vendor/k8s.io/apimachinery/pkg/util/sets/types \ - --output-base $(GENDIR)/vendor/k8s.io/apimachinery/pkg/util \ + --output-base $$(dirname $$(dirname $(location :byte.go))) \ --go-header-file $(location //hack/boilerplate:boilerplate.go.txt) \ --output-package sets """, @@ -46,13 +44,29 @@ $(location //cmd/libs/go2idl/set-gen) \ "//vendor/k8s.io/apimachinery/pkg/util/sets/types:go_default_library", ], tools = [ - "//cmd/libs/go2idl/set-gen", + "//vendor/k8s.io/code-generator/cmd/set-gen", ], ) go_test( name = "go_default_test", srcs = ["set_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/sets", library = ":go_default_library", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/sets/types:all-srcs", + ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD index 5b5d461e9d..9680c1fa7b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,12 +9,30 @@ load( go_test( name = "go_default_test", srcs = ["validation_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/validation", library = ":go_default_library", - tags = ["automanaged"], + deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], ) go_library( name = "go_default_library", srcs = ["validation.go"], + importpath = "k8s.io/apimachinery/pkg/util/validation", + deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:all-srcs", + ], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD index 7a777062a4..5508ab94c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -14,8 +12,8 @@ go_test( "errors_test.go", "path_test.go", ], + importpath = "k8s.io/apimachinery/pkg/util/validation/field", library = ":go_default_library", - tags = ["automanaged"], ) go_library( @@ -24,9 +22,22 @@ go_library( "errors.go", "path.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/validation/field", deps = [ "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", ], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 43c779a11b..31705dee38 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -19,6 +19,7 @@ package field import ( "fmt" "reflect" + "strconv" "strings" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -175,7 +176,11 @@ func Invalid(field *Path, value interface{}, detail string) *Error { func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" if validValues != nil && len(validValues) > 0 { - detail = "supported values: " + strings.Join(validValues, ", ") + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") } return &Error{ErrorTypeNotSupported, field.String(), value, detail} } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index b1fcc57081..7da6a17d99 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -22,6 +22,8 @@ import ( "net" "regexp" "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -67,6 +69,21 @@ func IsQualifiedName(value string) []string { return errs } +// IsFullyQualifiedName checks if the name is fully qualified. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" const LabelValueMaxLength int = 63 @@ -126,7 +143,7 @@ func IsDNS1123Subdomain(value string) []string { } const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -188,6 +205,14 @@ func IsValidPortNum(port int) []string { return []string{InclusiveRangeError(1, 65535)} } +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + // Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 // TODO: once we have a type for UID/GID we should make these that type. const ( @@ -277,6 +302,22 @@ func IsHTTPHeaderName(value string) []string { return nil } +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + const configMapKeyFmt = `[-._a-zA-Z0-9]+` const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" @@ -291,13 +332,7 @@ func IsConfigMapKey(value string) []string { if !configMapKeyRegexp.MatchString(value) { errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) } - if value == "." { - errs = append(errs, `must not be '.'`) - } else if value == ".." { - errs = append(errs, `must not be '..'`) - } else if strings.HasPrefix(value, "..") { - errs = append(errs, `must not start with '..'`) - } + errs = append(errs, hasChDirPrefix(value)...) return errs } @@ -341,3 +376,16 @@ func prefixEach(msgs []string, prefix string) []string { func InclusiveRangeError(lo, hi int) string { return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) } + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD index 3a98b65b92..6eca13c02b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,8 +9,8 @@ load( go_test( name = "go_default_test", srcs = ["wait_test.go"], + importpath = "k8s.io/apimachinery/pkg/util/wait", library = ":go_default_library", - tags = ["automanaged"], deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) @@ -22,6 +20,19 @@ go_library( "doc.go", "wait.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/util/wait", deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], ) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index badaa21596..0997de8065 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -17,8 +17,10 @@ limitations under the License. package wait import ( + "context" "errors" "math/rand" + "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" @@ -36,6 +38,40 @@ var ForeverTestTimeout = time.Second * 30 // NeverStop may be passed to Until to make it never stop. var NeverStop <-chan struct{} = make(chan struct{}) +// Group allows to start a group of goroutines and wait for their completion. +type Group struct { + wg sync.WaitGroup +} + +func (g *Group) Wait() { + g.wg.Wait() +} + +// StartWithChannel starts f in a new goroutine in the group. +// stopCh is passed to f as an argument. f should stop when stopCh is available. +func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) { + g.Start(func() { + f(stopCh) + }) +} + +// StartWithContext starts f in a new goroutine in the group. +// ctx is passed to f as an argument. f should stop when ctx.Done() is available. +func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) { + g.Start(func() { + f(ctx) + }) +} + +// Start starts f in a new goroutine in the group. +func (g *Group) Start(f func()) { + g.wg.Add(1) + go func() { + defer g.wg.Done() + f() + }() +} + // Forever calls f every period for ever. // // Forever is syntactic sugar on top of Until. diff --git a/vendor/k8s.io/apimachinery/pkg/watch/BUILD b/vendor/k8s.io/apimachinery/pkg/watch/BUILD index 29ca5c810c..da068f70d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/watch/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -17,10 +15,12 @@ go_library( "streamwatcher.go", "until.go", "watch.go", + "zz_generated.deepcopy.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/watch", deps = [ "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", @@ -37,7 +37,7 @@ go_test( "streamwatcher_test.go", "watch_test.go", ], - tags = ["automanaged"], + importpath = "k8s.io/apimachinery/pkg/watch_test", deps = [ "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -45,3 +45,28 @@ go_test( "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", ], ) + +go_test( + name = "go_default_test", + srcs = ["until_test.go"], + importpath = "k8s.io/apimachinery/pkg/watch", + library = ":go_default_library", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go index fafccd78ef..a65088c1cf 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -84,6 +84,13 @@ type functionFakeRuntimeObject func() func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { + if obj == nil { + return nil + } + // funcs are immutable. Hence, just return the original func. + return obj +} // Execute f, blocking the incoming queue (and waiting for it to drain first). // The purpose of this terrible hack is so that watchers added after an event diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index dd49c41f9a..5c1380b234 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,9 +20,9 @@ import ( "fmt" "sync" - "k8s.io/apimachinery/pkg/runtime" - "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/runtime" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -50,6 +50,7 @@ const ( ) // Event represents a single event to a watched resource. +// +k8s:deepcopy-gen=true type Event struct { Type EventType diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go new file mode 100644 index 0000000000..322923d4a0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -0,0 +1,59 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package watch + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*Event).DeepCopyInto(out.(*Event)) + return nil + }, InType: reflect.TypeOf(&Event{})}, + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + if in.Object == nil { + out.Object = nil + } else { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD index 985afc384f..9f09628b62 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD @@ -1,7 +1,5 @@ package(default_visibility = ["//visibility:public"]) -licenses(["notice"]) - load( "@io_bazel_rules_go//go:def.bzl", "go_library", @@ -11,15 +9,25 @@ load( go_test( name = "go_default_test", srcs = ["deep_equal_test.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", library = ":go_default_library", - tags = ["automanaged"], ) go_library( name = "go_default_library", - srcs = [ - "deep_equal.go", - "type.go", - ], + srcs = ["deep_equal.go"], + importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go deleted file mode 100644 index 67957ee33e..0000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go +++ /dev/null @@ -1,91 +0,0 @@ -//This package is copied from Go library reflect/type.go. -//The struct tag library provides no way to extract the list of struct tags, only -//a specific tag -package reflect - -import ( - "fmt" - - "strconv" - "strings" -) - -type StructTag struct { - Name string - Value string -} - -func (t StructTag) String() string { - return fmt.Sprintf("%s:%q", t.Name, t.Value) -} - -type StructTags []StructTag - -func (tags StructTags) String() string { - s := make([]string, 0, len(tags)) - for _, tag := range tags { - s = append(s, tag.String()) - } - return "`" + strings.Join(s, " ") + "`" -} - -func (tags StructTags) Has(name string) bool { - for i := range tags { - if tags[i].Name == name { - return true - } - } - return false -} - -// ParseStructTags returns the full set of fields in a struct tag in the order they appear in -// the struct tag. -func ParseStructTags(tag string) (StructTags, error) { - tags := StructTags{} - for tag != "" { - // Skip leading space. - i := 0 - for i < len(tag) && tag[i] == ' ' { - i++ - } - tag = tag[i:] - if tag == "" { - break - } - - // Scan to colon. A space, a quote or a control character is a syntax error. - // Strictly speaking, control chars include the range [0x7f, 0x9f], not just - // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters - // as it is simpler to inspect the tag's bytes than the tag's runes. - i = 0 - for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { - i++ - } - if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { - break - } - name := string(tag[:i]) - tag = tag[i+1:] - - // Scan quoted string to find value. - i = 1 - for i < len(tag) && tag[i] != '"' { - if tag[i] == '\\' { - i++ - } - i++ - } - if i >= len(tag) { - break - } - qvalue := string(tag[:i+1]) - tag = tag[i+1:] - - value, err := strconv.Unquote(qvalue) - if err != nil { - return nil, err - } - tags = append(tags, StructTag{Name: name, Value: value}) - } - return tags, nil -} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/k8s.io/client-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD b/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD deleted file mode 100644 index 6e2604388a..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS deleted file mode 100755 index 4135522b21..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -reviewers: -- liggitt -- lavalamp -- wojtek-t -- deads2k -- sttts -- timothysc -- mbohlool -- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go deleted file mode 100644 index 194de434d8..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=authentication.k8s.io -package authentication diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/register.go deleted file mode 100644 index b0ac3c28bf..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authentication - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go deleted file mode 100644 index 9c1e66b7bb..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authentication - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // ImpersonateUserHeader is used to impersonate a particular user during an API server request - ImpersonateUserHeader = "Impersonate-User" - - // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. - // It can be repeated multiplied times for multiple groups. - ImpersonateGroupHeader = "Impersonate-Group" - - // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the - // extra map[string][]string for user.Info. The key will be every after the prefix. - // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple - // times to have multiple elements in the slice under a single key - ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// TokenReview attempts to authenticate a token to a known user. -type TokenReview struct { - metav1.TypeMeta - // ObjectMeta fulfills the metav1.ObjectMetaAccessor interface so that the stock - // REST handler paths work - metav1.ObjectMeta - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec - - // Status is filled in by the server and indicates whether the request can be authenticated. - Status TokenReviewStatus -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - Token string -} - -// TokenReviewStatus is the result of the token authentication request. -// This type mirrors the authentication.Token interface -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - Authenticated bool - // User is the UserInfo associated with the provided token. - User UserInfo - // Error indicates that the token couldn't be checked - Error string -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - Username string - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - UID string - // The names of groups this user is a part of. - Groups []string - // Any additional information provided by the authenticator. - Extra map[string]ExtraValue -} - -// ExtraValue masks the value so protobuf can generate -type ExtraValue []string diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD deleted file mode 100644 index 10ead221ba..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - tags = ["automanaged"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/pkg/apis/authentication:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go deleted file mode 100644 index 2ff5732d6d..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions - return scheme.AddConversionFuncs() -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go deleted file mode 100644 index 074e15baed..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go deleted file mode 100644 index 8140e47c59..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=authentication.k8s.io -package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go deleted file mode 100644 index e264811365..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go +++ /dev/null @@ -1,1301 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo") -} -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *TokenReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - return i, nil -} - -func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Authenticated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil -} - -func (m *UserInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TokenReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Token) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = m.User.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UserInfo) Size() (n int) { - var l int - _ = l - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TokenReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewSpec{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewStatus{`, - `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, - `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *UserInfo) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&UserInfo{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Authenticated = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/client-go/pkg/apis/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 640 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xb6, 0xf3, 0x53, 0x25, 0x1b, 0x0a, 0x65, 0x25, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x2e, 0x39, - 0x94, 0x35, 0x29, 0xa8, 0x54, 0x05, 0x84, 0x6a, 0x51, 0xa1, 0x1e, 0x00, 0x69, 0xa1, 0x08, 0x71, - 0x81, 0x8d, 0x33, 0x75, 0x96, 0xd4, 0x3f, 0x5a, 0xaf, 0x03, 0xbd, 0xf5, 0x11, 0x38, 0x72, 0xe4, - 0x35, 0x78, 0x83, 0xde, 0xe8, 0x8d, 0x1e, 0x50, 0x45, 0xcd, 0x8b, 0xa0, 0x5d, 0x2f, 0x4d, 0xda, - 0x52, 0xa1, 0xf6, 0xe6, 0xfd, 0x66, 0xbe, 0x6f, 0xbe, 0x99, 0xf1, 0xa0, 0x87, 0xe3, 0x95, 0x94, - 0xf0, 0xd8, 0x1d, 0x67, 0x03, 0x10, 0x11, 0x48, 0x48, 0xdd, 0x64, 0x1c, 0xb8, 0x2c, 0xe1, 0xa9, - 0xcb, 0x32, 0x39, 0x82, 0x48, 0x72, 0x9f, 0x49, 0x1e, 0x47, 0xee, 0xa4, 0xef, 0x06, 0x10, 0x81, - 0x60, 0x12, 0x86, 0x24, 0x11, 0xb1, 0x8c, 0xf1, 0x62, 0xc1, 0x26, 0x53, 0x36, 0x49, 0xc6, 0x01, - 0x51, 0x6c, 0x72, 0x92, 0x4d, 0x26, 0xfd, 0xd6, 0xed, 0x80, 0xcb, 0x51, 0x36, 0x20, 0x7e, 0x1c, - 0xba, 0x41, 0x1c, 0xc4, 0xae, 0x16, 0x19, 0x64, 0x5b, 0xfa, 0xa5, 0x1f, 0xfa, 0xab, 0x10, 0x6f, - 0xdd, 0x33, 0xd6, 0x58, 0xc2, 0x43, 0xe6, 0x8f, 0x78, 0x04, 0x62, 0x67, 0x6a, 0x2e, 0x04, 0xc9, - 0xfe, 0x61, 0xa9, 0xe5, 0x9e, 0xc7, 0x12, 0x59, 0x24, 0x79, 0x08, 0x67, 0x08, 0xcb, 0xff, 0x23, - 0xa4, 0xfe, 0x08, 0x42, 0x76, 0x86, 0x77, 0xf7, 0x3c, 0x5e, 0x26, 0xf9, 0xb6, 0xcb, 0x23, 0x99, - 0x4a, 0x71, 0x9a, 0xd4, 0xbd, 0x8f, 0xd0, 0xfa, 0x27, 0x29, 0xd8, 0x6b, 0xb6, 0x9d, 0x01, 0x6e, - 0xa3, 0x2a, 0x97, 0x10, 0xa6, 0x4d, 0xbb, 0x53, 0xee, 0xd5, 0xbd, 0x7a, 0x7e, 0xd8, 0xae, 0x6e, - 0x28, 0x80, 0x16, 0xf8, 0x6a, 0xed, 0xcb, 0xd7, 0xb6, 0xb5, 0xfb, 0xb3, 0x63, 0x75, 0xbf, 0x95, - 0x50, 0xe3, 0x55, 0x3c, 0x86, 0x88, 0xc2, 0x84, 0xc3, 0x47, 0xfc, 0x1e, 0xd5, 0xd4, 0x04, 0x86, - 0x4c, 0xb2, 0xa6, 0xdd, 0xb1, 0x7b, 0x8d, 0xa5, 0x3b, 0xc4, 0x2c, 0x63, 0xd6, 0xd0, 0x74, 0x1d, - 0x2a, 0x9b, 0x4c, 0xfa, 0xe4, 0xc5, 0xe0, 0x03, 0xf8, 0xf2, 0x19, 0x48, 0xe6, 0xe1, 0xbd, 0xc3, - 0xb6, 0x95, 0x1f, 0xb6, 0xd1, 0x14, 0xa3, 0xc7, 0xaa, 0xf8, 0x1d, 0xaa, 0xa4, 0x09, 0xf8, 0xcd, - 0x92, 0x56, 0x7f, 0x44, 0x2e, 0xb2, 0x6a, 0x32, 0x63, 0xf5, 0x65, 0x02, 0xbe, 0x77, 0xc5, 0x94, - 0xaa, 0xa8, 0x17, 0xd5, 0xc2, 0x38, 0x40, 0x73, 0xa9, 0x64, 0x32, 0x4b, 0x9b, 0x65, 0x5d, 0xe2, - 0xf1, 0xe5, 0x4b, 0x68, 0x19, 0xef, 0xaa, 0x29, 0x32, 0x57, 0xbc, 0xa9, 0x91, 0xef, 0x2e, 0xa3, - 0x6b, 0xa7, 0xfc, 0xe0, 0x5b, 0xa8, 0x2a, 0x15, 0xa4, 0x67, 0x57, 0xf7, 0xe6, 0x0d, 0xb3, 0x5a, - 0xe4, 0x15, 0xb1, 0xee, 0x77, 0x1b, 0x5d, 0x3f, 0x53, 0x05, 0x3f, 0x40, 0xf3, 0x33, 0x66, 0x60, - 0xa8, 0x25, 0x6a, 0xde, 0x0d, 0x23, 0x31, 0xbf, 0x36, 0x1b, 0xa4, 0x27, 0x73, 0xf1, 0x1b, 0x54, - 0xc9, 0x52, 0x10, 0x66, 0xa8, 0xcb, 0x17, 0xeb, 0x78, 0x33, 0x05, 0xb1, 0x11, 0x6d, 0xc5, 0xd3, - 0x69, 0x2a, 0x84, 0x6a, 0x45, 0xd5, 0x11, 0x08, 0x11, 0x0b, 0x3d, 0xcc, 0x99, 0x8e, 0xd6, 0x15, - 0x48, 0x8b, 0x58, 0xf7, 0x47, 0x09, 0xd5, 0xfe, 0xaa, 0xe0, 0x45, 0x54, 0x53, 0xcc, 0x88, 0x85, - 0x60, 0xc6, 0xb0, 0x60, 0x48, 0x3a, 0x47, 0xe1, 0xf4, 0x38, 0x03, 0xdf, 0x44, 0xe5, 0x8c, 0x0f, - 0xb5, 0xf1, 0xba, 0xd7, 0x30, 0x89, 0xe5, 0xcd, 0x8d, 0x27, 0x54, 0xe1, 0xb8, 0x8b, 0xe6, 0x02, - 0x11, 0x67, 0x89, 0x5a, 0xa6, 0xfa, 0x97, 0x91, 0xda, 0xc3, 0x53, 0x8d, 0x50, 0x13, 0xc1, 0x5b, - 0xa8, 0x0a, 0xea, 0xe7, 0x6f, 0x56, 0x3a, 0xe5, 0x5e, 0x63, 0x69, 0xed, 0x72, 0xdd, 0x13, 0x7d, - 0x40, 0xeb, 0x91, 0x14, 0x3b, 0x33, 0x5d, 0x2a, 0x8c, 0x16, 0xf2, 0x2d, 0x61, 0x8e, 0x4c, 0xe7, - 0xe0, 0x05, 0x54, 0x1e, 0xc3, 0x4e, 0xd1, 0x21, 0x55, 0x9f, 0xf8, 0x39, 0xaa, 0x4e, 0xd4, 0xfd, - 0x99, 0x2d, 0xac, 0x5c, 0xcc, 0xc7, 0xf4, 0x7e, 0x69, 0x21, 0xb3, 0x5a, 0x5a, 0xb1, 0xbd, 0xde, - 0xde, 0x91, 0x63, 0xed, 0x1f, 0x39, 0xd6, 0xc1, 0x91, 0x63, 0xed, 0xe6, 0x8e, 0xbd, 0x97, 0x3b, - 0xf6, 0x7e, 0xee, 0xd8, 0x07, 0xb9, 0x63, 0xff, 0xca, 0x1d, 0xfb, 0xf3, 0x6f, 0xc7, 0x7a, 0x5b, - 0x9a, 0xf4, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x01, 0xcb, 0xf3, 0xc9, 0x72, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto deleted file mode 100644 index 411065848a..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.client_go.pkg.apis.authentication.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -message TokenReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional TokenReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - optional TokenReviewStatus status = 3; -} - -// TokenReviewSpec is a description of the token authentication request. -message TokenReviewSpec { - // Token is the opaque bearer token. - // +optional - optional string token = 1; -} - -// TokenReviewStatus is the result of the token authentication request. -message TokenReviewStatus { - // Authenticated indicates that the token was associated with a known user. - // +optional - optional bool authenticated = 1; - - // User is the UserInfo associated with the provided token. - // +optional - optional UserInfo user = 2; - - // Error indicates that the token couldn't be checked - // +optional - optional string error = 3; -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -message UserInfo { - // The name that uniquely identifies this user among all active users. - // +optional - optional string username = 1; - - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - optional string uid = 2; - - // The names of groups this user is a part of. - // +optional - repeated string groups = 3; - - // Any additional information provided by the authenticator. - // +optional - map extra = 4; -} - diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go deleted file mode 100644 index b4ab6519c0..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs, addConversionFuncs) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go deleted file mode 100644 index 21051f137b..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // ImpersonateUserHeader is used to impersonate a particular user during an API server request - ImpersonateUserHeader = "Impersonate-User" - - // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. - // It can be repeated multiplied times for multiple groups. - ImpersonateGroupHeader = "Impersonate-Group" - - // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the - // extra map[string][]string for user.Info. The key will be every after the prefix. - // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple - // times to have multiple elements in the slice under a single key - ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" -) - -// +genclient=true -// +nonNamespaced=true -// +noMethods=true - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -type TokenReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - // +optional - Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - // +optional - Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` - // User is the UserInfo associated with the provided token. - // +optional - User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Error indicates that the token couldn't be checked - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` - // The names of groups this user is a part of. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` - // Any additional information provided by the authenticator. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go deleted file mode 100644 index bb235e4eaa..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_TokenReview = map[string]string{ - "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request can be authenticated.", -} - -func (TokenReview) SwaggerDoc() map[string]string { - return map_TokenReview -} - -var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", -} - -func (TokenReviewSpec) SwaggerDoc() map[string]string { - return map_TokenReviewSpec -} - -var map_TokenReviewStatus = map[string]string{ - "": "TokenReviewStatus is the result of the token authentication request.", - "authenticated": "Authenticated indicates that the token was associated with a known user.", - "user": "User is the UserInfo associated with the provided token.", - "error": "Error indicates that the token couldn't be checked", -} - -func (TokenReviewStatus) SwaggerDoc() map[string]string { - return map_TokenReviewStatus -} - -var map_UserInfo = map[string]string{ - "": "UserInfo holds the information about the user needed to implement the user.Info interface.", - "username": "The name that uniquely identifies this user among all active users.", - "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", - "groups": "The names of groups this user is a part of.", - "extra": "Any additional information provided by the authenticator.", -} - -func (UserInfo) SwaggerDoc() map[string]string { - return map_UserInfo -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go deleted file mode 100644 index 3a4923cc20..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go +++ /dev/null @@ -1,153 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - authentication "k8s.io/client-go/pkg/apis/authentication" - unsafe "unsafe" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_TokenReview_To_authentication_TokenReview, - Convert_authentication_TokenReview_To_v1_TokenReview, - Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec, - Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec, - Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus, - Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus, - Convert_v1_UserInfo_To_authentication_UserInfo, - Convert_authentication_UserInfo_To_v1_UserInfo, - ) -} - -func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_TokenReview_To_authentication_TokenReview is an autogenerated conversion function. -func Convert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { - return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s) -} - -func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_authentication_TokenReview_To_v1_TokenReview is an autogenerated conversion function. -func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { - return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s) -} - -func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -// Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec is an autogenerated conversion function. -func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { - return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) -} - -func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -// Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec is an autogenerated conversion function. -func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s) -} - -func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -// Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus is an autogenerated conversion function. -func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { - return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) -} - -func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - out.Error = in.Error - return nil -} - -// Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus is an autogenerated conversion function. -func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s) -} - -func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -// Convert_v1_UserInfo_To_authentication_UserInfo is an autogenerated conversion function. -func Convert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { - return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) -} - -func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) - return nil -} - -// Convert_authentication_UserInfo_To_v1_UserInfo is an autogenerated conversion function. -func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { - return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go deleted file mode 100644 index 929c85fcab..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - -// DeepCopy_v1_TokenReview is an autogenerated deepcopy function. -func DeepCopy_v1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReview) - out := out.(*TokenReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) - } - if err := DeepCopy_v1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -// DeepCopy_v1_TokenReviewSpec is an autogenerated deepcopy function. -func DeepCopy_v1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewSpec) - out := out.(*TokenReviewSpec) - *out = *in - return nil - } -} - -// DeepCopy_v1_TokenReviewStatus is an autogenerated deepcopy function. -func DeepCopy_v1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewStatus) - out := out.(*TokenReviewStatus) - *out = *in - if err := DeepCopy_v1_UserInfo(&in.User, &out.User, c); err != nil { - return err - } - return nil - } -} - -// DeepCopy_v1_UserInfo is an autogenerated deepcopy function. -func DeepCopy_v1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*UserInfo) - out := out.(*UserInfo) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go deleted file mode 100644 index 6df448eb9f..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by defaulter-gen. Do not edit it manually! - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go deleted file mode 100644 index f3b47e8402..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package authentication - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - -// DeepCopy_authentication_TokenReview is an autogenerated deepcopy function. -func DeepCopy_authentication_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReview) - out := out.(*TokenReview) - *out = *in - if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { - return err - } else { - out.ObjectMeta = *newVal.(*v1.ObjectMeta) - } - if err := DeepCopy_authentication_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -// DeepCopy_authentication_TokenReviewSpec is an autogenerated deepcopy function. -func DeepCopy_authentication_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewSpec) - out := out.(*TokenReviewSpec) - *out = *in - return nil - } -} - -// DeepCopy_authentication_TokenReviewStatus is an autogenerated deepcopy function. -func DeepCopy_authentication_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*TokenReviewStatus) - out := out.(*TokenReviewStatus) - *out = *in - if err := DeepCopy_authentication_UserInfo(&in.User, &out.User, c); err != nil { - return err - } - return nil - } -} - -// DeepCopy_authentication_UserInfo is an autogenerated deepcopy function. -func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*UserInfo) - out := out.(*UserInfo) - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue) - for key, val := range *in { - if newVal, err := c.DeepCopy(&val); err != nil { - return err - } else { - (*out)[key] = *newVal.(*ExtraValue) - } - } - } - return nil - } -} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE similarity index 100% rename from vendor/k8s.io/apimachinery/LICENSE rename to vendor/k8s.io/kube-openapi/LICENSE diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go similarity index 85% rename from vendor/k8s.io/apimachinery/pkg/openapi/common.go rename to vendor/k8s.io/kube-openapi/pkg/common/common.go index bfab64a1c2..fbe01cabb3 100644 --- a/vendor/k8s.io/apimachinery/pkg/openapi/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package openapi +package common import ( + "net/http" + "strings" + "github.com/emicklei/go-restful" "github.com/go-openapi/spec" - "strings" ) // OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. @@ -41,6 +43,10 @@ type OpenAPIDefinitionGetter interface { OpenAPIDefinition() *OpenAPIDefinition } +type PathHandler interface { + Handle(path string, handler http.Handler) +} + // Config is set of configuration for openAPI spec generation. type Config struct { // List of supported protocols such as https, http, etc. @@ -83,6 +89,30 @@ type Config struct { DefaultSecurity []map[string][]string } +var schemaTypeFormatMap = map[string][]string{ + "uint": {"integer", "int32"}, + "uint8": {"integer", "byte"}, + "uint16": {"integer", "int32"}, + "uint32": {"integer", "int64"}, + "uint64": {"integer", "int64"}, + "int": {"integer", "int32"}, + "int8": {"integer", "byte"}, + "int16": {"integer", "int32"}, + "int32": {"integer", "int32"}, + "int64": {"integer", "int64"}, + "byte": {"integer", "byte"}, + "float64": {"number", "double"}, + "float32": {"number", "float"}, + "bool": {"boolean", ""}, + "time.Time": {"string", "date-time"}, + "string": {"string", ""}, + "integer": {"integer", ""}, + "number": {"number", ""}, + "boolean": {"boolean", ""}, + "[]byte": {"string", "byte"}, // base64 encoded characters + "interface{}": {"object", ""}, +} + // This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are // two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type // comment (the comment that is added before type definition) will be lost. The spec will still have the property @@ -123,29 +153,6 @@ type Config struct { // } // func GetOpenAPITypeFormat(typeName string) (string, string) { - schemaTypeFormatMap := map[string][]string{ - "uint": {"integer", "int32"}, - "uint8": {"integer", "byte"}, - "uint16": {"integer", "int32"}, - "uint32": {"integer", "int64"}, - "uint64": {"integer", "int64"}, - "int": {"integer", "int32"}, - "int8": {"integer", "byte"}, - "int16": {"integer", "int32"}, - "int32": {"integer", "int32"}, - "int64": {"integer", "int64"}, - "byte": {"integer", "byte"}, - "float64": {"number", "double"}, - "float32": {"number", "float"}, - "bool": {"boolean", ""}, - "time.Time": {"string", "date-time"}, - "string": {"string", ""}, - "integer": {"integer", ""}, - "number": {"number", ""}, - "boolean": {"boolean", ""}, - "[]byte": {"string", "byte"}, // base64 encoded characters - "interface{}": {"object", ""}, - } mapped, ok := schemaTypeFormatMap[typeName] if !ok { return "", "" diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go b/vendor/k8s.io/kube-openapi/pkg/common/doc.go similarity index 83% rename from vendor/k8s.io/apimachinery/pkg/openapi/doc.go rename to vendor/k8s.io/kube-openapi/pkg/common/doc.go index 5ed572cc13..2ba6d247b3 100644 --- a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/doc.go @@ -14,5 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// package openapi holds shared codes and types between open API code generator and spec generator. -package openapi +// package common holds shared code and types between open API code +// generator and spec generator. +package common diff --git a/vendor/vendor.json b/vendor/vendor.json index 2889bf1a53..ce00f12565 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -21,80 +21,80 @@ { "checksumSHA1": "AH7jcN7pvaPDU6UjHdpT081DDGk=", "path": "cloud.google.com/go/compute/metadata", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { "checksumSHA1": "/ixPd+hSgsbAjBI/fPqmHtTFRM8=", "path": "cloud.google.com/go/iam", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { "checksumSHA1": "vXPMGAHxvqWSpSFqqUfZBZS1dBo=", "path": "cloud.google.com/go/internal", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { "checksumSHA1": "MCns2LLZtUZEx6JWyYBrcbSuTXg=", "path": "cloud.google.com/go/internal/optional", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { - "checksumSHA1": "P0DmgQq10tIKBxGrPwdOGXEGk0A=", + "checksumSHA1": "QXE70x1YpmwfX8bqcncO5LxjeEA=", "path": "cloud.google.com/go/internal/version", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { - "checksumSHA1": "QHbGgo+9M+l/RmOV1EA2p/miarI=", + "checksumSHA1": "c3zsw7IgrkgaX9KacOB3suvjRIE=", "path": "cloud.google.com/go/storage", - "revision": "0b124d8375f8cf5cb1004fa21571940a58c8a0e9", - "revisionTime": "2017-09-15T18:33:45Z" + "revision": "837004fdf983457dd0f2db2b4d798886d2a321be", + "revisionTime": "2017-10-26T23:50:15Z" }, { - "checksumSHA1": "ayQzpLsnDmiaVeKWQeySiIvAjiA=", + "checksumSHA1": "KzijvZt+5kIVOsdv8GJkUK9njpc=", "path": "github.com/Azure/azure-sdk-for-go/storage", - "revision": "df4dd90d076ebbf6e87d08d3f00bfac8ff4bde1a", - "revisionTime": "2017-09-06T21:46:31Z" + "revision": "699ac40079a6709355d1f555ca22fa30660bf519", + "revisionTime": "2017-10-26T22:25:37Z" }, { - "checksumSHA1": "9WLmKV9jbHzGscteXIHXJxrNgic=", + "checksumSHA1": "9NFR6RG8H2fNyKHscGmuGLQhRm4=", "path": "github.com/Azure/go-ansiterm", - "revision": "19f72df4d05d31cbe1c56bfc8045c96babff6c7e", - "revisionTime": "2017-06-29T20:46:27Z" + "revision": "d6e3b3328b783f23731bc4d058875b0371ff8109", + "revisionTime": "2017-09-29T23:40:23Z" }, { - "checksumSHA1": "J6OhSKiGRYT0ymRvvu1T6YnKSWo=", + "checksumSHA1": "3/UphB+6Hbx5otA4PjFjvObT+L4=", "path": "github.com/Azure/go-ansiterm/winterm", - "revision": "19f72df4d05d31cbe1c56bfc8045c96babff6c7e", - "revisionTime": "2017-06-29T20:46:27Z" + "revision": "d6e3b3328b783f23731bc4d058875b0371ff8109", + "revisionTime": "2017-09-29T23:40:23Z" }, { - "checksumSHA1": "+4d+Y67AMKKuyR1EO33Zdt+RVx0=", + "checksumSHA1": "Xd5xJ9A8KynuExnf30qfukN3RR0=", "path": "github.com/Azure/go-autorest/autorest", - "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", - "revisionTime": "2017-09-13T22:19:17Z" + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" }, { - "checksumSHA1": "7G4HgRaIT25bgz/hPtXG6Kv8Fho=", + "checksumSHA1": "Ktj3H1WpOqxnC9kdAA+F7Ol7/RQ=", "path": "github.com/Azure/go-autorest/autorest/adal", - "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", - "revisionTime": "2017-09-13T22:19:17Z" + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" }, { - "checksumSHA1": "2KdBFgT4qY+fMOkBTa5vA9V0AiM=", + "checksumSHA1": "Pt0rzDhmiWpNeGvXT73kra89guI=", "path": "github.com/Azure/go-autorest/autorest/azure", - "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", - "revisionTime": "2017-09-13T22:19:17Z" + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" }, { - "checksumSHA1": "LSF/pNrjhIxl6jiS6bKooBFCOxI=", + "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=", "path": "github.com/Azure/go-autorest/autorest/date", - "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", - "revisionTime": "2017-09-13T22:19:17Z" + "revision": "c0eb859387e57a164bf64171da307e2ef8168b58", + "revisionTime": "2017-10-20T21:14:44Z" }, { "checksumSHA1": "mYXO8s/vu7+guTlxQxhDHsh/E/c=", @@ -103,10 +103,10 @@ "revisionTime": "2017-09-05T09:56:32Z" }, { - "checksumSHA1": "J47s5lV7x01fXcBuLVY+Tzm9KnM=", + "checksumSHA1": "0wdHgfg/Zj50H6FMbN2MnuR6YXA=", "path": "github.com/Jeffail/gabs", - "revision": "44ad915e1baa36caddb7db85303c3406042f7187", - "revisionTime": "2017-09-12T13:33:11Z" + "revision": "44cbc27138518b15305cb3eef220d04f2d641b9b", + "revisionTime": "2017-10-15T11:14:30Z" }, { "checksumSHA1": "o/3cn04KAiwC7NqNVvmfVTD+hgA=", @@ -121,54 +121,52 @@ "revisionTime": "2012-06-04T00:48:16Z" }, { - "checksumSHA1": "NX4v3cbkXAJxFlrncqT9yEUBuoA=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/PuerkitoBio/purell", + "checksumSHA1": "nJrp/CKnvpO+vzTfOeR8qmzRZ4c=", "path": "github.com/PuerkitoBio/purell", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "fd18e053af8a4ff11039269006e8037ff374ce0e", + "revisionTime": "2017-09-17T14:39:11Z" }, { "checksumSHA1": "/jQPcsccvsC9GVM9pV6fESxWOUk=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/PuerkitoBio/urlesc", "path": "github.com/PuerkitoBio/urlesc", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "de5bf2ad457846296e2031421a34e2568e304e35", + "revisionTime": "2017-08-10T14:37:23Z" }, { - "checksumSHA1": "g8C6yI8XhyDLnCFwvfYe8CT96ZU=", + "checksumSHA1": "GhYRloF/vX2lEl+fh5NXpr5DJ9o=", "path": "github.com/SAP/go-hdb/driver", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "NMOfkY6oRCxCXzM1LVgCyGCLp0Y=", "path": "github.com/SAP/go-hdb/driver/sqltrace", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "977xfUN5PjvUvvJl5aTIyu+pILg=", "path": "github.com/SAP/go-hdb/internal/bufio", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "xL3ef789fS4+MSxb59Apj0D1BlA=", "path": "github.com/SAP/go-hdb/internal/protocol", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "KK2U+DbcpWBexg9ZMEKGRIaqFtU=", "path": "github.com/SAP/go-hdb/internal/unicode", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "fb432MWzL2ONS0e6HyGv3P2oG1I=", "path": "github.com/SAP/go-hdb/internal/unicode/cesu8", - "revision": "cbb0eccdfff1bdf6454418056a33b4407215e4a2", - "revisionTime": "2017-08-02T08:23:16Z" + "revision": "5155b87edff9ebf661f8102079fa3463ac83b7af", + "revisionTime": "2017-10-09T11:13:23Z" }, { "checksumSHA1": "t+uej2kiyqRyQYguygI8t9nJH2w=", @@ -195,29 +193,22 @@ "revisionTime": "2016-12-05T22:51:55Z" }, { - "checksumSHA1": "CujWu7+PWlZSX5+zAPJH91O5AVQ=", - "origin": "github.com/docker/docker/vendor/github.com/Sirupsen/logrus", - "path": "github.com/Sirupsen/logrus", - "revision": "8bb8ac7f788b258ee17473d9f2779341506aed0a", - "revisionTime": "2017-01-23T16:49:28Z" - }, - { - "checksumSHA1": "0et4hA6AYqZCgYiY+c6Z17t3k3k=", + "checksumSHA1": "xp/2s4XclLL17DThGBI7jXZ4Crs=", "path": "github.com/armon/go-metrics", - "revision": "0a12dc6f6b9da6da644031a1b9b5a85478c5ee27", - "revisionTime": "2017-09-13T18:48:37Z" + "revision": "9a4b6e10bed6220a1665955aa2b75afc91eb10b3", + "revisionTime": "2017-10-02T18:27:31Z" }, { "checksumSHA1": "xCsGGM9TKBogZDfSN536KtQdLko=", "path": "github.com/armon/go-metrics/circonus", - "revision": "0a12dc6f6b9da6da644031a1b9b5a85478c5ee27", - "revisionTime": "2017-09-13T18:48:37Z" + "revision": "9a4b6e10bed6220a1665955aa2b75afc91eb10b3", + "revisionTime": "2017-10-02T18:27:31Z" }, { "checksumSHA1": "Dt0n1sSivvvdZQdzc4Hu/yOG+T0=", "path": "github.com/armon/go-metrics/datadog", - "revision": "0a12dc6f6b9da6da644031a1b9b5a85478c5ee27", - "revisionTime": "2017-09-13T18:48:37Z" + "revision": "9a4b6e10bed6220a1665955aa2b75afc91eb10b3", + "revisionTime": "2017-10-02T18:27:31Z" }, { "checksumSHA1": "MzSim/5A5kPO2q0n3aKq8H5qJvU=", @@ -232,202 +223,202 @@ "revisionTime": "2017-07-27T15:54:43Z" }, { - "checksumSHA1": "NCiuP2GF1WDY7N3kH5d6NO/cQoY=", + "checksumSHA1": "oV+bancgD0IUbMZzY3nyLTMNjRM=", "path": "github.com/asaskevich/govalidator", - "revision": "73945b6115bfbbcc57d89b7316e28109364124e1", - "revisionTime": "2017-09-03T09:52:15Z" + "revision": "ddb193b4c7bfa1d1c1923ae05a1ee8fb0cd45cf3", + "revisionTime": "2017-10-20T21:07:42Z" }, { - "checksumSHA1": "fXhvoy6YMNQ+MzKIp0AyYTik9MA=", + "checksumSHA1": "YhARllgSKHk/9SL4UuyM7kpXFnk=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "n98FANpNeRT5kf6pizdpI7nm6Sw=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "7/8j/q0TWtOgXyvEcv4B2Dhl00o=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "Y+cPwQL0dZMyqp3wI+KJWmA9KQ8=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "ZdtYh3ZHSgP/WEIaqwJHTEhpkbs=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "PinnnEkhXyxcdFrOB+L/PxtVhYI=", + "checksumSHA1": "28TXzVpyhT3JFRT3vLo/GTKc3Wc=", "path": "github.com/aws/aws-sdk-go/aws/endpoints", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "n/tgGgh0wICYu+VDYSqlsRy4w9s=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "SK5Mn4Ga9+equOQTYA1DTSb3LWY=", + "checksumSHA1": "HcGL4e6Uep4/80eCUI5xkcWjpQ0=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "iywvraxbXf3A/FOzFWjKfBBEQRA=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "04ypv4x12l4q0TksA1zEVsmgpvw=", "path": "github.com/aws/aws-sdk-go/internal/shareddefaults", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "1QmQ3FqV37w0Zi44qv8pA1GeR0A=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "O6hcK24yI6w7FA+g4Pbr+eQ7pys=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "R00RL5jJXRYq1iiK1+PGvMfvXyM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=", + "checksumSHA1": "9V1PvtFQ9MObZTc3sa86WcuOtOU=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "Qk98eGaCDibYSf0u3E2q7fAGnbY=", + "checksumSHA1": "ycd0mxQjtWfbtlk+W2N7N6QdxFw=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "jUnnkPjga3FkOTMYHoYl+kQ8tZU=", + "checksumSHA1": "/I6I2nR59isqKtSpEnTfLRWZ8Mc=", "path": "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "E3UN74VVqzHAgT0VUmfKX+d+PJ0=", + "checksumSHA1": "vU/6dpCdnPMy35VFUju8+Dxn4sA=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "H41YeQoLzuR1gQvBytlaL/gql+A=", + "checksumSHA1": "a+tKLYUEM3ZftY52P2ywtIxlCxE=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { - "checksumSHA1": "SEKg+cGyOj6dKdK5ltUHsoL4R4Y=", + "checksumSHA1": "QPxEB1a4X55kNCF/GlH18mOrHMA=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "MerduaV3PxtZAWvOGpgoBIglo38=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "8d2831dd9188c8f7a1c9719564d23f879bba0179", - "revisionTime": "2017-09-15T23:47:43Z" + "revision": "8feb05fb5bffe503a94fb9a687a01a9eca0dcf37", + "revisionTime": "2017-10-27T17:14:03Z" }, { "checksumSHA1": "oTmBS67uxM6OXB/+OJUAG9LK4jw=", @@ -436,28 +427,28 @@ "revisionTime": "2017-04-17T20:07:03Z" }, { - "checksumSHA1": "uz40oH/u0lUXQPH+dMn1COrGr+o=", + "checksumSHA1": "Fnz7xSfgecNEaVi+I5a7HA1cl3I=", "path": "github.com/boombuler/barcode", - "revision": "45e1f5ec9e3e4bb74da34aee9ae6cb6dec7a30c8", - "revisionTime": "2017-09-08T15:22:17Z" + "revision": "3cfea5ab600ae37946be2b763b8ec2c1cf2d272d", + "revisionTime": "2017-09-22T10:33:52Z" }, { "checksumSHA1": "jWsoIeAcg4+QlCJLZ8jXHiJ5a3s=", "path": "github.com/boombuler/barcode/qr", - "revision": "45e1f5ec9e3e4bb74da34aee9ae6cb6dec7a30c8", - "revisionTime": "2017-09-08T15:22:17Z" + "revision": "3cfea5ab600ae37946be2b763b8ec2c1cf2d272d", + "revisionTime": "2017-09-22T10:33:52Z" }, { "checksumSHA1": "axe0OTdOjYa+XKDUYqzOv7FGaWo=", "path": "github.com/boombuler/barcode/utils", - "revision": "45e1f5ec9e3e4bb74da34aee9ae6cb6dec7a30c8", - "revisionTime": "2017-09-08T15:22:17Z" + "revision": "3cfea5ab600ae37946be2b763b8ec2c1cf2d272d", + "revisionTime": "2017-09-22T10:33:52Z" }, { - "checksumSHA1": "u8n5T1RWE2k2jkddsmQ1SMtssms=", + "checksumSHA1": "BkXphDj0I6VmogVzOSg8xfBHXdQ=", "path": "github.com/cenk/backoff", - "revision": "61153c768f31ee5f130071d08fc82b85208528de", - "revisionTime": "2017-07-11T19:02:43Z" + "revision": "309aa717adbf351e92864cbedf9cca0b769a4b5a", + "revisionTime": "2017-10-07T11:45:50Z" }, { "checksumSHA1": "sFjc2R+KS9AeXIPMV4KCw+GwX5I=", @@ -466,28 +457,28 @@ "revisionTime": "2017-09-11T15:31:29Z" }, { - "checksumSHA1": "WsB6y1Yd+kDbHGz1Rm7xZ44hyAE=", + "checksumSHA1": "1+wh7HBBExMeHebpcxtDOIrIlCs=", "path": "github.com/circonus-labs/circonus-gometrics", - "revision": "85eb4cccf312bf5868a05cceef09cf75dd18dddd", - "revisionTime": "2017-08-21T15:30:54Z" + "revision": "b25d14eeef390159289ad3e8521eff3162c59685", + "revisionTime": "2017-10-19T13:11:15Z" }, { - "checksumSHA1": "LzcVA+G9EdU+v9qlykOXVUZCIs4=", + "checksumSHA1": "AeoKS438naDrZxFIVU8u8tVGyLg=", "path": "github.com/circonus-labs/circonus-gometrics/api", - "revision": "85eb4cccf312bf5868a05cceef09cf75dd18dddd", - "revisionTime": "2017-08-21T15:30:54Z" + "revision": "b25d14eeef390159289ad3e8521eff3162c59685", + "revisionTime": "2017-10-19T13:11:15Z" }, { "checksumSHA1": "bQhz/fcyZPmuHSH2qwC4ZtATy5c=", "path": "github.com/circonus-labs/circonus-gometrics/api/config", - "revision": "85eb4cccf312bf5868a05cceef09cf75dd18dddd", - "revisionTime": "2017-08-21T15:30:54Z" + "revision": "b25d14eeef390159289ad3e8521eff3162c59685", + "revisionTime": "2017-10-19T13:11:15Z" }, { - "checksumSHA1": "8J8kP2A6I7xr0YbWy22j4+SU1uk=", + "checksumSHA1": "yMRPGEGVO916uQ/YIEhPC14Qqek=", "path": "github.com/circonus-labs/circonus-gometrics/checkmgr", - "revision": "85eb4cccf312bf5868a05cceef09cf75dd18dddd", - "revisionTime": "2017-08-21T15:30:54Z" + "revision": "b25d14eeef390159289ad3e8521eff3162c59685", + "revisionTime": "2017-10-19T13:11:15Z" }, { "checksumSHA1": "wytZe79b+6N7+sa2h4hbM6141KU=", @@ -496,94 +487,94 @@ "revisionTime": "2017-05-25T20:16:49Z" }, { - "checksumSHA1": "iqKEjY4Grc0IAfPagpbwO0pDSeM=", + "checksumSHA1": "5hMJ3VvBuAx6DThEVO0GhZEwPCA=", "path": "github.com/cockroachdb/cockroach-go/crdb", - "revision": "c806b484b8611fc587b6abc07f8bb0f7824a78d6", - "revisionTime": "2017-08-08T22:01:06Z" + "revision": "0d8b4682f140f0fe486ef7e3d2f70665f3066906", + "revisionTime": "2017-10-23T19:07:33Z" }, { "checksumSHA1": "GqIrOttKaO7k6HIaHQLPr3cY7rY=", "path": "github.com/containerd/continuity/pathdriver", - "revision": "35d55c5e8dd23b32037d56cf97174aff3efdfa83", - "revisionTime": "2017-09-13T16:46:42Z" + "revision": "1bed1ecb1dc42d8f4d2ac8c23e5cac64749e82c9", + "revisionTime": "2017-10-04T13:49:16Z" }, { "checksumSHA1": "7BC2/27NId9xaPDB5w3nWN2mn9A=", "path": "github.com/coreos/etcd/auth/authpb", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { - "checksumSHA1": "yAb5iB7rjx6mYdRSoBUtnKlA3E8=", + "checksumSHA1": "XtVAwbJWD12FGuZrIkxpe8t9TB8=", "path": "github.com/coreos/etcd/client", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { - "checksumSHA1": "ED2nsrSpTfKZ08+V7Ap+qJp735w=", + "checksumSHA1": "4m3qRCZfmTyRSzd4eH8ovMb5OXY=", "path": "github.com/coreos/etcd/clientv3", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "LpOgTec6cz2Tf3zDav7VkqMHmBM=", "path": "github.com/coreos/etcd/clientv3/concurrency", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { - "checksumSHA1": "IhfdapvN/pqvnHV81gLbiDbP9Gg=", + "checksumSHA1": "VMC9J0rMVk3Fv8r8Bj7qqLlXc3E=", "path": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "c0ltvGUOnk8qaEshFwc0PDH5nbc=", "path": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "JAkX9DfIBrSe0vUa07xl5cikxVQ=", "path": "github.com/coreos/etcd/mvcc/mvccpb", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "mKIXx1kDwmVmdIpZ3pJtRBuUKso=", "path": "github.com/coreos/etcd/pkg/pathutil", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "z+C4BtPa8wbOUKW5dmHyhNnTulg=", "path": "github.com/coreos/etcd/pkg/srv", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "rMyIh9PsSvPs6Yd+YgKITQzQJx8=", "path": "github.com/coreos/etcd/pkg/tlsutil", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { - "checksumSHA1": "J3Ob8OxG+QWqj1JpeHv+CgOAWok=", + "checksumSHA1": "agofzi+YZ7VYbxCldLaHYHAtlpc=", "path": "github.com/coreos/etcd/pkg/transport", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "gx1gJIMU6T0UNQ0bPZ/drQ8cpCI=", "path": "github.com/coreos/etcd/pkg/types", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "sp2FkEyaIGiQFOEZCTDkBZgyHOs=", "path": "github.com/coreos/etcd/version", - "revision": "6cf0fd7cb0dc48592f81201c6ed9c48a5bc40170", - "revisionTime": "2017-09-15T01:25:25Z" + "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", + "revisionTime": "2017-10-27T18:46:46Z" }, { "checksumSHA1": "97BsbXOiZ8+Kr+LIuZkQFtSj7H4=", @@ -592,178 +583,166 @@ "revisionTime": "2017-06-13T09:22:38Z" }, { - "checksumSHA1": "/UK1meOFsZCen8j5oys86YGZigk=", + "checksumSHA1": "w0tKAADAfc6jNZR/sSqjSXsw3Hg=", "path": "github.com/denisenkom/go-mssqldb", - "revision": "b49fae7d39756354262c5f385d4cf04f9ac6618b", - "revisionTime": "2017-09-14T13:05:12Z" + "revision": "88555645b640cc621e32f8693d7586a1aa1575f4", + "revisionTime": "2017-10-06T17:24:03Z" }, { - "checksumSHA1": "GXOurDGgsLmJs0wounpdWZZRSGw=", + "checksumSHA1": "+TKtBzv23ywvmmqRiGEjUba4YmI=", "path": "github.com/dgrijalva/jwt-go", - "revision": "a539ee1a749a2b895533f979515ac7e6e0f5b650", - "revisionTime": "2017-06-08T00:51:49Z" + "revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29", + "revisionTime": "2017-10-19T21:57:19Z" }, { - "checksumSHA1": "P2SUyZzCzHMkb3tfts/32MzCM6A=", + "checksumSHA1": "jJYb3/z8SQUqOQK1VVfG1k8ODco=", "path": "github.com/docker/docker/api/types", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "jVJDbe0IcyjoKc2xbohwzQr+FF0=", "path": "github.com/docker/docker/api/types/blkiodev", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "wyg5aht8yEIRsgW/PzDvy9yIF7U=", "path": "github.com/docker/docker/api/types/container", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "S4SWOa0XduRd8ene8Alwih2Nwcw=", "path": "github.com/docker/docker/api/types/filters", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "uJeLBKpHZXP+bWhXP4HhpyUTWYI=", "path": "github.com/docker/docker/api/types/mount", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "Gskp+nvbVe8Gk1xPLHylZvNmqTg=", "path": "github.com/docker/docker/api/types/network", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "r2vWq7Uc3ExKzMqYgH0b4AKjLKY=", "path": "github.com/docker/docker/api/types/registry", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "VTxWyFud/RedrpllGdQonVtGM/A=", "path": "github.com/docker/docker/api/types/strslice", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "WNhyKx+2cJ5Gx3jdCeDr0J43F3Y=", "path": "github.com/docker/docker/api/types/swarm", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "mi8EDCDjtrZEONRXPG7VHJosDwY=", "path": "github.com/docker/docker/api/types/swarm/runtime", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "uDPQ3nHsrvGQc9tg/J9OSC4N5dQ=", "path": "github.com/docker/docker/api/types/versions", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "mZ6S+W3VaotYJZnuJim6PNV04C8=", + "checksumSHA1": "qY6OxHo29P8r5E8DWe2xJBhQ+D0=", "path": "github.com/docker/docker/opts", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "UVWAKMlz7uJkCwDBka0kfL3fLJc=", + "checksumSHA1": "fzHqlNW/7ENrADaVp301GWHKGm0=", "path": "github.com/docker/docker/pkg/archive", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "/rhDgzb51QH29Dn6v9lr/NQrLbs=", + "checksumSHA1": "cHtl1iwPIEobY8Hj9Ww6vJeDlu8=", "path": "github.com/docker/docker/pkg/fileutils", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "OSFbrnYeBqJzNJ5CsHzQpfCzpR0=", "path": "github.com/docker/docker/pkg/homedir", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "5UI7YJZQYPQhSj3HTUT6h5j7SeE=", + "checksumSHA1": "yOye880NsHuCiDUcLakROyBKZOI=", "path": "github.com/docker/docker/pkg/idtools", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "jsjQr20W2W6Gewf8Un3D8IKu2I8=", "path": "github.com/docker/docker/pkg/ioutils", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "kp20bhjkvJ06uW6DRfVIZbCj8SY=", - "path": "github.com/docker/docker/pkg/jsonlog", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" - }, - { - "checksumSHA1": "d7KXgDaJPBbQ8LFKwdwNU2yZz3k=", + "checksumSHA1": "g3RZfMeU7DDreHT4gQcdc3Bf784=", "path": "github.com/docker/docker/pkg/jsonmessage", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "ndnAFCfsGC3upNQ6jAEwzxcurww=", "path": "github.com/docker/docker/pkg/longpath", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "w8pyOX/u8XwXRmPWlpKpBBwwYUg=", + "checksumSHA1": "FZu68xyweD/3NeOOBPub4fN8bm0=", "path": "github.com/docker/docker/pkg/mount", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "cS0+jrjme0j9GX8LLcioQ7ZOBsQ=", "path": "github.com/docker/docker/pkg/pools", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" - }, - { - "checksumSHA1": "txf3EORYff4hO6PEvwBm2lyh1MU=", - "path": "github.com/docker/docker/pkg/promise", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "H1rrbVmeE1z2TnkF7tSrfh+qUOY=", "path": "github.com/docker/docker/pkg/stdcopy", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "UEMAKQqAyL9hs6RWxesQuYMQ3+I=", + "checksumSHA1": "H2cIcBmojOYqzXs/9gMJg1uH5nI=", "path": "github.com/docker/docker/pkg/system", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { - "checksumSHA1": "dLlr8QsbOA1BkgdKXNWdqrYsx3E=", + "checksumSHA1": "wgZDlSK1R+oVFZEfFbxIBl/qGfc=", "path": "github.com/docker/docker/pkg/term", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "NcljihOPc95QOyQAdufyy3eqKSU=", "path": "github.com/docker/docker/pkg/term/windows", - "revision": "a5f9783c930834b8e6035fb0ad9c22fd4bbfc355", - "revisionTime": "2017-09-15T23:49:48Z" + "revision": "ecf4125b85e0faa57d2739348e0d453c1d24d10c", + "revisionTime": "2017-10-27T13:10:09Z" }, { "checksumSHA1": "JbiWTzH699Sqz25XmDlsARpMN9w=", @@ -790,30 +769,28 @@ "revisionTime": "2016-06-27T13:04:34Z" }, { - "checksumSHA1": "g3z4plpw9F/ho3hdJb+X/bN/OgE=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/emicklei/go-restful", + "checksumSHA1": "9If/IfPOApLO9VKwvbfu/TzAr/0=", "path": "github.com/emicklei/go-restful", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "dc0f94ee75de39d6420e5446b0222490264bb90f", + "revisionTime": "2017-10-05T04:51:49Z" }, { "checksumSHA1": "rmsBHtFpV3osid71XnTZBo/b3bU=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/emicklei/go-restful/log", "path": "github.com/emicklei/go-restful/log", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "dc0f94ee75de39d6420e5446b0222490264bb90f", + "revisionTime": "2017-10-05T04:51:49Z" }, { - "checksumSHA1": "40Ns85VYa4smQPcewZ7SOdfLnKU=", + "checksumSHA1": "Z2LEpah3ZMTYNpRy7Rd+tI+T+W0=", "path": "github.com/fatih/structs", - "revision": "7e5a8eef611ee84dd359503f3969f80df4c50723", - "revisionTime": "2017-06-12T12:55:44Z" + "revision": "f5faa72e73092639913f5833b75e1ac1d6bc7a63", + "revisionTime": "2017-10-20T06:48:19Z" }, { - "checksumSHA1": "/zZhbeh4tViFE/MZLgMDT9kgjjc=", + "checksumSHA1": "gV+ZxXUKi8RW1EjP+LZllNS2ScY=", "path": "github.com/fsouza/go-dockerclient", - "revision": "98edf3edfae6a6500fecc69d2bcccf1302544004", - "revisionTime": "2017-08-30T18:11:06Z" + "revision": "de3ca5806469df077d946342784e525cc7d078ed", + "revisionTime": "2017-10-27T13:47:50Z" }, { "checksumSHA1": "BjjcPf2i7KfBnVazHZCAe9xn6jY=", @@ -834,130 +811,124 @@ "revisionTime": "2016-12-05T22:32:45Z" }, { - "checksumSHA1": "87LEfpY9cOk9CP7pWyIbmQ/6enU=", + "checksumSHA1": "Y/t3trtei7dkADWEVv8WiPuYv60=", "path": "github.com/go-ini/ini", - "revision": "c787282c39ac1fc618827141a1f762240def08a3", - "revisionTime": "2017-08-13T05:22:30Z" + "revision": "f280b3ba517bf5fc98922624f21fb0e7a92adaec", + "revisionTime": "2017-10-26T18:46:43Z" }, { - "checksumSHA1": "U/LyuZbzAdQo0WG1KPc+Nv65FKc=", + "checksumSHA1": "1AP23CNIbIxDH/auM59029H0CGY=", "path": "github.com/go-ldap/ldap", - "revision": "95ede1266b237bf8e9aa5dce0b3250e51bfefe66", - "revisionTime": "2017-08-24T17:36:02Z" + "revision": "0ae9f2495c4a9e5d436bc9a2b13a71a2fb06ddf3", + "revisionTime": "2017-09-29T19:17:40Z" }, { "checksumSHA1": "6dTGC5A1Y1xnv+JSi9z8S6JfnH0=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/go-openapi/jsonpointer", "path": "github.com/go-openapi/jsonpointer", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "779f45308c19820f1a69e9a4cd965f496e0da10f", + "revisionTime": "2017-01-02T17:42:23Z" }, { "checksumSHA1": "YMNc0I/ifBw9TsnF13NTpIN9yu4=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/go-openapi/jsonreference", "path": "github.com/go-openapi/jsonreference", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "36d33bfe519efae5632669801b180bf1a245da3b", + "revisionTime": "2016-11-05T16:21:50Z" }, { - "checksumSHA1": "x1yvWanIW3DJS8I5HMuN6CdZXDg=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/go-openapi/spec", + "checksumSHA1": "txgFRCgsTgslzMi8kzfq5xGqsJ4=", "path": "github.com/go-openapi/spec", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "84b5bee7bcb76f3d17bcbaf421bac44bd5709ca6", + "revisionTime": "2017-10-19T17:48:23Z" }, { "checksumSHA1": "Wz6dE9E9ZwCK4oWRr5lNazhgMY0=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/go-openapi/swag", "path": "github.com/go-openapi/swag", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "f3f9494671f93fcff853e3c6e9e948b3eb71e590", + "revisionTime": "2017-06-06T14:27:51Z" }, { - "checksumSHA1": "TYcDSEYn/Fj7Bx6TD7j+bx6zNfc=", + "checksumSHA1": "os4jdoOUjr86qvOwri8Ut1rXDrg=", "path": "github.com/go-sql-driver/mysql", - "revision": "1548d61ac1b48a30add201183a992009bc02af20", - "revisionTime": "2017-09-15T05:15:32Z" + "revision": "fade21009797158e7b79e04c340118a9220c6f9e", + "revisionTime": "2017-10-17T18:16:16Z" }, { - "checksumSHA1": "zOYbwp6iTI4XXz1u6w8Yp4xeM3U=", + "checksumSHA1": "Gk4WdAKNJ1F28JTy4aXUAIHrLZE=", "path": "github.com/gocql/gocql", - "revision": "afe4fee3ded9a8e20b174f88e3d9b6ed30ad4d66", - "revisionTime": "2017-09-13T17:07:25Z" + "revision": "7fabcab850b7eade1d0e0ac5ac0b6b66d6801a75", + "revisionTime": "2017-10-26T22:00:49Z" }, { "checksumSHA1": "7RlYIbPYgPkxDDCSEuE6bvYEEeU=", "path": "github.com/gocql/gocql/internal/lru", - "revision": "afe4fee3ded9a8e20b174f88e3d9b6ed30ad4d66", - "revisionTime": "2017-09-13T17:07:25Z" + "revision": "7fabcab850b7eade1d0e0ac5ac0b6b66d6801a75", + "revisionTime": "2017-10-26T22:00:49Z" }, { "checksumSHA1": "ctK9mwZKnt/8dHxx2Ef6nZTljZs=", "path": "github.com/gocql/gocql/internal/murmur", - "revision": "afe4fee3ded9a8e20b174f88e3d9b6ed30ad4d66", - "revisionTime": "2017-09-13T17:07:25Z" + "revision": "7fabcab850b7eade1d0e0ac5ac0b6b66d6801a75", + "revisionTime": "2017-10-26T22:00:49Z" }, { "checksumSHA1": "tZQDfMMTKrYMXqen0zjJWLtOf1A=", "path": "github.com/gocql/gocql/internal/streams", - "revision": "afe4fee3ded9a8e20b174f88e3d9b6ed30ad4d66", - "revisionTime": "2017-09-13T17:07:25Z" + "revision": "7fabcab850b7eade1d0e0ac5ac0b6b66d6801a75", + "revisionTime": "2017-10-26T22:00:49Z" }, { "checksumSHA1": "wn2shNJMwRZpvuvkf1s7h0wvqHI=", "path": "github.com/gogo/protobuf/proto", - "revision": "2adc21fd136931e0388e278825291678e1d98309", - "revisionTime": "2017-08-31T18:35:40Z" + "revision": "117892bf1866fbaa2318c03e50e40564c8845457", + "revisionTime": "2017-10-18T11:19:13Z" }, { "checksumSHA1": "HPVQZu059/Rfw2bAWM538bVTcUc=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/gogo/protobuf/sortkeys", "path": "github.com/gogo/protobuf/sortkeys", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "117892bf1866fbaa2318c03e50e40564c8845457", + "revisionTime": "2017-10-18T11:19:13Z" }, { "checksumSHA1": "HmbftipkadrLlCfzzVQ+iFHbl6g=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/golang/glog", "path": "github.com/golang/glog", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998", + "revisionTime": "2016-01-25T20:49:56Z" }, { "checksumSHA1": "yqF125xVSkmfLpIVGrLlfE05IUk=", "path": "github.com/golang/protobuf/proto", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { - "checksumSHA1": "haP5YgoPcGgm3e0QHByz89T7rGI=", + "checksumSHA1": "XNHQiRltA7NQJV0RvUroY+cf+zg=", "path": "github.com/golang/protobuf/protoc-gen-go/descriptor", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { "checksumSHA1": "VfkiItDBFFkZluaAMAzJipDXNBY=", "path": "github.com/golang/protobuf/ptypes", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { - "checksumSHA1": "CUN+8uU3mWzv9JNB2RXTefAuBb4=", + "checksumSHA1": "UB9scpDxeFjQe5tEthuR4zCLRu4=", "path": "github.com/golang/protobuf/ptypes/any", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { "checksumSHA1": "hUjAj0dheFVDl84BAnSWj9qy2iY=", "path": "github.com/golang/protobuf/ptypes/duration", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { "checksumSHA1": "O2ItP5rmfrgxPufhjJXbFlXuyL8=", "path": "github.com/golang/protobuf/ptypes/timestamp", - "revision": "11b8df160996e00fd4b55cbaafb3d84ec6d50fa8", - "revisionTime": "2017-09-14T23:13:19Z" + "revision": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9", + "revisionTime": "2017-10-21T04:39:52Z" }, { "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", @@ -966,10 +937,10 @@ "revisionTime": "2017-02-15T23:32:05Z" }, { - "checksumSHA1": "Y1SmfKIwMTCDF+S9Y/TKRjbz9wI=", + "checksumSHA1": "T6Nu7fdIUq0/xJ3NkjTWP1CL9xc=", "path": "github.com/google/go-github/github", - "revision": "12126fbfe000a09047ebdeb2b5160be3af88ab9e", - "revisionTime": "2017-09-07T20:09:01Z" + "revision": "dd25daf12b8e763b650e4839dfb8e8094de22cc4", + "revisionTime": "2017-10-26T22:12:31Z" }, { "checksumSHA1": "p3IB18uJRs4dL2K5yx24MrLYE9A=", @@ -979,10 +950,9 @@ }, { "checksumSHA1": "PFtXkXPO7pwRtykVUUXtc07wc7U=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/google/gofuzz", "path": "github.com/google/gofuzz", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "24818f796faf91cd76ec7bddd72458fbced7a6c1", + "revisionTime": "2017-06-12T17:47:53Z" }, { "checksumSHA1": "y1/eOdw+BOXCuT83J7mP3ReXaf8=", @@ -997,16 +967,16 @@ "revisionTime": "2016-01-25T11:53:50Z" }, { - "checksumSHA1": "HjNeYKKxpwCxOWa53xMQgNZYg4Y=", + "checksumSHA1": "4hc6jGp9/1m7dp5ACRcGnspjO5E=", "path": "github.com/hashicorp/consul/api", - "revision": "d84c0b1a01b4252f85b71a4ade2543652a206024", - "revisionTime": "2017-09-14T04:22:53Z" + "revision": "610f3c86a089817b5bd5729a3b8c2db33a9ae2b0", + "revisionTime": "2017-10-26T17:59:57Z" }, { - "checksumSHA1": "6DmSB2tmQYZnFLHJIGSLUAS6wcc=", + "checksumSHA1": "tU6mtqrRxFn0gqqRC35JhD0y2rE=", "path": "github.com/hashicorp/consul/lib", - "revision": "d84c0b1a01b4252f85b71a4ade2543652a206024", - "revisionTime": "2017-09-14T04:22:53Z" + "revision": "610f3c86a089817b5bd5729a3b8c2db33a9ae2b0", + "revisionTime": "2017-10-26T17:59:57Z" }, { "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", @@ -1021,10 +991,10 @@ "revisionTime": "2017-02-11T01:34:15Z" }, { - "checksumSHA1": "0OUXdKhaE6TzpHevY0VFlAA5YJ8=", + "checksumSHA1": "AA0aYmdg4pb5gPCUSXg8iPzxLag=", "path": "github.com/hashicorp/go-hclog", - "revision": "8105cc0a3736cc153a2025f5d0d91b80045fc9ff", - "revisionTime": "2017-09-03T16:32:58Z" + "revision": "ca137eb4b4389c9bc6f1a6d887f056bf16c00510", + "revisionTime": "2017-10-05T15:17:51Z" }, { "checksumSHA1": "Cas2nprG6pWzf05A2F/OlnjUu2Y=", @@ -1093,94 +1063,88 @@ "revisionTime": "2016-08-13T22:13:03Z" }, { - "checksumSHA1": "Hp4nitbU8nyDAi+q3WwMqpOjFQw=", + "checksumSHA1": "HtpYAWHvd9mq+mHkpo7z8PGzMik=", "path": "github.com/hashicorp/hcl", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=", "path": "github.com/hashicorp/hcl/hcl/ast", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "/15SVLnCDzxICSatuYbfctrcpSM=", "path": "github.com/hashicorp/hcl/hcl/parser", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { - "checksumSHA1": "z6wdP4mRw4GVjShkNHDaOWkbxS0=", + "checksumSHA1": "PYDzRc61T0pbwWuLNHgBRp/gJII=", "path": "github.com/hashicorp/hcl/hcl/scanner", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "oS3SCN9Wd6D8/LG0Yx1fu84a7gI=", "path": "github.com/hashicorp/hcl/hcl/strconv", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=", "path": "github.com/hashicorp/hcl/hcl/token", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "PwlfXt7mFS8UYzWxOK5DOq0yxS0=", "path": "github.com/hashicorp/hcl/json/parser", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { - "checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=", + "checksumSHA1": "afrZ8VmAwfTdDAYVgNSXbxa4GsA=", "path": "github.com/hashicorp/hcl/json/scanner", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=", "path": "github.com/hashicorp/hcl/json/token", - "revision": "68e816d1c783414e79bc65b3994d9ab6b0a722ab", - "revisionTime": "2017-09-14T15:46:24Z" + "revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8", + "revisionTime": "2017-10-17T18:19:29Z" }, { - "checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=", + "checksumSHA1": "mS15CkImPzXYsgNwl3Mt9Gh3Vb0=", "path": "github.com/hashicorp/serf/coordinate", - "revision": "555c6fa2a11d1f8e08ac9964015e864a3f61e7c3", - "revisionTime": "2017-09-02T04:25:43Z" + "revision": "c20a0b1b1ea9eb8168bcdec0116688fa9254e449", + "revisionTime": "2017-10-22T02:00:50Z" }, { - "checksumSHA1": "/11y5HSVYFHHjBUHUIt35qi6D/g=", - "path": "github.com/hashicorp/vault-plugin-auth-gcp", - "revision": "440e5e20278c115840fab1f27b7a9f99d405ebbd", - "revisionTime": "2017-10-05T00:02:52Z" - }, - { - "checksumSHA1": "a/6XqbRHzvZ1ngOoGASIIIcmjwM=", + "checksumSHA1": "of/8Lz7af5X3b1tdoVQSzOpmxMo=", "path": "github.com/hashicorp/vault-plugin-auth-gcp/plugin", - "revision": "440e5e20278c115840fab1f27b7a9f99d405ebbd", - "revisionTime": "2017-10-05T00:02:52Z" + "revision": "4febf0d5513b41e1a3b46036e8e2d03469235205", + "revisionTime": "2017-10-09T12:38:52Z" }, { - "checksumSHA1": "nrNcGdv/8Ut8ScFy3tQoY3dpQvs=", + "checksumSHA1": "ffJQvzbQvmCG/PdaElGSfGnDgNM=", "path": "github.com/hashicorp/vault-plugin-auth-gcp/plugin/util", - "revision": "a807a8507e636e40403455258ed25954ec254cad", - "revisionTime": "2017-09-15T19:03:59Z" + "revision": "4febf0d5513b41e1a3b46036e8e2d03469235205", + "revisionTime": "2017-10-09T12:38:52Z" }, { - "checksumSHA1": "/xje1EITZoa0tj7KqPof+3FG+og=", + "checksumSHA1": "dy0XIP1KUS6QA0HEL8nN+9YIghU=", "path": "github.com/hashicorp/vault-plugin-auth-kubernetes", - "revision": "7b79e81da4f5d56811fb7ddda078a96f7f950814", - "revisionTime": "2017-10-05T00:02:34Z" + "revision": "bf36d311cbb89cfb09ffe1cfdbe70c10eb861cdc", + "revisionTime": "2017-10-05T23:21:19Z" }, { "checksumSHA1": "ZhK6IO2XN81Y+3RAjTcVm1Ic7oU=", "path": "github.com/hashicorp/yamux", - "revision": "d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd", - "revisionTime": "2016-07-20T23:31:40Z" + "revision": "f5742cb6b85602e7fa834e9d5d91a7d7fa850824", + "revisionTime": "2017-10-05T17:02:12Z" }, { "checksumSHA1": "cIinEjB62s8j5cpY1u7sxtg4akg=", @@ -1197,113 +1161,110 @@ { "checksumSHA1": "VJk3rOWfxEV9Ilig5lgzH1qg8Ss=", "path": "github.com/keybase/go-crypto/brainpool", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "rnRjEJs5luF+DIXp2J6LFcQk8Gg=", "path": "github.com/keybase/go-crypto/cast5", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "F5++ZQS5Vt7hd6lxPCKTffvph1A=", "path": "github.com/keybase/go-crypto/curve25519", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "IvrDXwIixB5yPPbo6tq1/1cSn78=", "path": "github.com/keybase/go-crypto/ed25519", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "4+fslB6pCbplNq4viy6CrOkkY6Y=", "path": "github.com/keybase/go-crypto/ed25519/internal/edwards25519", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "fgFlkfkaotUjBVhJik2979oCeJw=", "path": "github.com/keybase/go-crypto/openpgp", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { - "checksumSHA1": "+spfcEChljh3yeIg4K/xHOQ2pVM=", + "checksumSHA1": "cdgDXvGPDDsu5OwRLxYeHRdb8hI=", "path": "github.com/keybase/go-crypto/openpgp/armor", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "nWhmwjBJqPSvkCWqaap2Z9EiS1k=", "path": "github.com/keybase/go-crypto/openpgp/ecdh", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "uxXG9IC/XF8jwwvZUbW65+x8/+M=", "path": "github.com/keybase/go-crypto/openpgp/elgamal", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "EyUf82Yknzc75m8RcA21CNQINw0=", "path": "github.com/keybase/go-crypto/openpgp/errors", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "tw0BkvixAuw9Ai80hHzFy6W5mnk=", "path": "github.com/keybase/go-crypto/openpgp/packet", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "BGDxg1Xtsz0DSPzdQGJLLQqfYc8=", "path": "github.com/keybase/go-crypto/openpgp/s2k", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { "checksumSHA1": "rE3pp7b3gfcmBregzpIvN5IdFhY=", "path": "github.com/keybase/go-crypto/rsa", - "revision": "433e2f3d43ef1bd31387582a899389b2fbe2005e", - "revisionTime": "2017-06-28T15:29:38Z" + "revision": "f63716704117f5bd34d8f0f068f7e8369d20d4ab", + "revisionTime": "2017-10-10T11:00:49Z" }, { - "checksumSHA1": "A3ymiEhz0WaT+sfK1aJhlqhf41o=", + "checksumSHA1": "3HVfwgLpCDH8JX211UWdrSi/GU4=", "path": "github.com/lib/pq", - "revision": "e42267488fe361b9dc034be7a6bffef5b195bceb", - "revisionTime": "2017-08-10T06:12:20Z" + "revision": "b609790bd85edf8e9ab7e0f8912750a786177bcf", + "revisionTime": "2017-10-22T19:20:43Z" }, { - "checksumSHA1": "q5SZBWFVC3wOIzftf+l/h5WLG1k=", + "checksumSHA1": "AU3fA8Sm33Vj9PBoRPSeYfxLRuE=", "path": "github.com/lib/pq/oid", - "revision": "e42267488fe361b9dc034be7a6bffef5b195bceb", - "revisionTime": "2017-08-10T06:12:20Z" + "revision": "b609790bd85edf8e9ab7e0f8912750a786177bcf", + "revisionTime": "2017-10-22T19:20:43Z" }, { "checksumSHA1": "T8soMJArSZrYnhmdpAnq1bVxQ6Q=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/mailru/easyjson/buffer", "path": "github.com/mailru/easyjson/buffer", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "4d347d79dea0067c945f374f990601decb08abb5", + "revisionTime": "2017-10-22T17:32:15Z" }, { - "checksumSHA1": "Xw5HIdQ2vlr5wkKl+8ANM9xp5Zs=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/mailru/easyjson/jlexer", + "checksumSHA1": "QA+9yav6Xzq7LmuZUREeOX1hfjk=", "path": "github.com/mailru/easyjson/jlexer", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "4d347d79dea0067c945f374f990601decb08abb5", + "revisionTime": "2017-10-22T17:32:15Z" }, { - "checksumSHA1": "tMeVsfYgDW9K0spDZkOBlvZYssw=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/mailru/easyjson/jwriter", + "checksumSHA1": "SEJUieuUW7Mj0adqvjTkes1ILXs=", "path": "github.com/mailru/easyjson/jwriter", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "4d347d79dea0067c945f374f990601decb08abb5", + "revisionTime": "2017-10-22T17:32:15Z" }, { "checksumSHA1": "cTDA66oZUy18cIzJsU1diKq+9CE=", @@ -1312,10 +1273,10 @@ "revisionTime": "2017-08-16T03:18:13Z" }, { - "checksumSHA1": "U6lX43KDDlNOn+Z0Yyww+ZzHfFo=", + "checksumSHA1": "y/A5iuvwjytQE2CqVuphQRXR2nI=", "path": "github.com/mattn/go-isatty", - "revision": "fc9e8d8ef48496124e79ae0df75490096eccf6fe", - "revisionTime": "2017-03-22T23:44:13Z" + "revision": "a5cdd64afdee435007ee3e9f6ed4684af949d568", + "revisionTime": "2017-09-25T05:49:04Z" }, { "checksumSHA1": "CIK3BBNX3nuUQCmNqTQydNfMNKI=", @@ -1354,16 +1315,16 @@ "revisionTime": "2016-12-03T19:45:07Z" }, { - "checksumSHA1": "6TBW88DSxRHf4WvOC9K5ilBZx/8=", + "checksumSHA1": "bDdhmDk8q6utWrccBhEOa6IoGkE=", "path": "github.com/mitchellh/go-testing-interface", - "revision": "7bf6f6eaf1bed2fd3c6c63114b18cb64facb9de2", - "revisionTime": "2017-09-01T15:50:38Z" + "revision": "a61a99592b77c9ba629d254a693acffaeb4b7e28", + "revisionTime": "2017-10-04T22:19:16Z" }, { - "checksumSHA1": "EHjhpHipgm+XGccrRAms9AW3Ewk=", + "checksumSHA1": "gILp4IL+xwXLH6tJtRLrnZ56F24=", "path": "github.com/mitchellh/mapstructure", - "revision": "d0303fe809921458f417bcf828397a65db30a7e4", - "revisionTime": "2017-05-23T03:00:23Z" + "revision": "06020f85339e21b2478f756a78e295255ffa4d6a", + "revisionTime": "2017-10-17T17:18:08Z" }, { "checksumSHA1": "AMU63CNOg4XmIhVR/S/Xttt1/f0=", @@ -1372,10 +1333,10 @@ "revisionTime": "2017-07-26T20:21:17Z" }, { - "checksumSHA1": "95Xtmhc8LANC3Y+3gn8yPVCt1uI=", + "checksumSHA1": "ClTd8kZFg6XnGO/UK6ZDekUCsWI=", "path": "github.com/ncw/swift", - "revision": "9d3f812e23d270d1c66a9a01e20af1005061cdc4", - "revisionTime": "2017-08-02T07:39:15Z" + "revision": "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d", + "revisionTime": "2017-10-19T11:44:56Z" }, { "checksumSHA1": "OFNit1Qx2DdWhotfREKodDNUwCM=", @@ -1386,26 +1347,26 @@ { "checksumSHA1": "ZGlIwSRjdLYCUII7JLE++N4w7Xc=", "path": "github.com/opencontainers/image-spec/specs-go", - "revision": "ebd93fd0782379ca3d821f0fa74f0651a9347a3e", - "revisionTime": "2017-09-13T09:08:22Z" + "revision": "7c889fafd04a893f5c5f50b7ab9963d5d64e5242", + "revisionTime": "2017-09-29T21:48:53Z" }, { "checksumSHA1": "jdbXRRzeu0njLE9/nCEZG+Yg/Jk=", "path": "github.com/opencontainers/image-spec/specs-go/v1", - "revision": "ebd93fd0782379ca3d821f0fa74f0651a9347a3e", - "revisionTime": "2017-09-13T09:08:22Z" + "revision": "7c889fafd04a893f5c5f50b7ab9963d5d64e5242", + "revisionTime": "2017-09-29T21:48:53Z" }, { - "checksumSHA1": "Yd7vPcBsfWE12ZWNK0KuBiRNAJY=", + "checksumSHA1": "lTrral6+RK0PSpHl3Tf7/hETqd4=", "path": "github.com/opencontainers/runc/libcontainer/system", - "revision": "593914b8bd5448a93f7c3e4902a03408b6d5c0ce", - "revisionTime": "2017-09-12T19:22:00Z" + "revision": "9a1186d128c4c4df4b2c6722976b49898cbe2e03", + "revisionTime": "2017-10-25T22:23:19Z" }, { "checksumSHA1": "6R1LMwH2YzUvD6hL6PXzV48jb30=", "path": "github.com/opencontainers/runc/libcontainer/user", - "revision": "593914b8bd5448a93f7c3e4902a03408b6d5c0ce", - "revisionTime": "2017-09-12T19:22:00Z" + "revision": "9a1186d128c4c4df4b2c6722976b49898cbe2e03", + "revisionTime": "2017-10-25T22:23:19Z" }, { "checksumSHA1": "wJWRH5ORhyIO29LxvA/Sug1skF0=", @@ -1428,8 +1389,8 @@ { "checksumSHA1": "rJab1YdNhQooDiBWNnt7TLWPyBU=", "path": "github.com/pkg/errors", - "revision": "2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb", - "revisionTime": "2017-09-10T13:46:14Z" + "revision": "f15c970de5b76fac0b59abb32d62c17cc7bed265", + "revisionTime": "2017-10-18T19:55:50Z" }, { "checksumSHA1": "rTNABfFJ9wtLQRH8uYNkEZGQOrY=", @@ -1486,10 +1447,10 @@ "revisionTime": "2017-01-28T01:21:29Z" }, { - "checksumSHA1": "Vo8anQz2gbx3FnURv/wpv57zWP4=", + "checksumSHA1": "U4ypvRxOj4YYX1YlHyeCPaFq5Xg=", "path": "github.com/samuel/go-zookeeper/zk", - "revision": "e6b59f6144beb8570562539c1898a0b1fea34b41", - "revisionTime": "2017-08-15T20:11:39Z" + "revision": "9a96098268ef555eb1f04d8b1ee813d0a87e5089", + "revisionTime": "2017-10-27T00:15:00Z" }, { "checksumSHA1": "iqUXcP3VA+G1/gVLRpQpBUt/BuA=", @@ -1500,8 +1461,8 @@ { "checksumSHA1": "YXWAUFGaKLSE8BIaCUIxAEW87s4=", "path": "github.com/sethgrid/pester", - "revision": "a86a2d88f4dc3c7dbf3a6a6bbbfb095690b834b6", - "revisionTime": "2017-08-16T16:42:08Z" + "revision": "0af5bab1e1ea2860c5aef8e77427bab011d774d8", + "revisionTime": "2017-09-19T13:57:42Z" }, { "checksumSHA1": "BYvROBsiyAXK4sq6yhDe8RgT4LM=", @@ -1510,462 +1471,484 @@ "revisionTime": "2017-08-22T13:27:46Z" }, { - "checksumSHA1": "STxYqRb4gnlSr3mRpT+Igfdz/kM=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/github.com/spf13/pflag", + "checksumSHA1": "Kby2HCu2WOsAHbS7x6zgWo5Ic4Q=", "path": "github.com/spf13/pflag", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "97afa5e7ca8a08a383cb259e06636b5e2cc7897f", + "revisionTime": "2017-10-20T11:06:17Z" }, { - "checksumSHA1": "0FCBLTqGOxgifAKfS0360iLTZR4=", + "checksumSHA1": "2xcr/mhxBFlDjpxe/Mc2Wb4RGR8=", + "path": "github.com/tv42/httpunix", + "revision": "b75d8614f926c077e48d85f1f8f7885b758c6225", + "revisionTime": "2015-04-27T01:28:21Z" + }, + { + "checksumSHA1": "gH74EO4Gd6AOhsGt3mYmRFCkk54=", "path": "github.com/ugorji/go/codec", - "revision": "8c0409fcbb70099c748d71f714529204975f6c3f", - "revisionTime": "2017-08-26T15:59:43Z" + "revision": "459bba837a9de7a4d9c58fa5a1171a2a3c96d0d0", + "revisionTime": "2017-10-27T08:51:27Z" }, { "checksumSHA1": "UWjVYmoHlIfHzVIskELHiJQtMOI=", "path": "golang.org/x/crypto/bcrypt", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "oVPHWesOmZ02vLq2fglGvf+AMgk=", "path": "golang.org/x/crypto/blowfish", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "IQkUIOnvlf0tYloFx9mLaXSvXWQ=", "path": "golang.org/x/crypto/curve25519", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "X6Q8nYb+KXh+64AKHwWOOcyijHQ=", "path": "golang.org/x/crypto/ed25519", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=", "path": "golang.org/x/crypto/ed25519/internal/edwards25519", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "4D8hxMIaSDEW5pCQk22Xj4DcDh4=", "path": "golang.org/x/crypto/hkdf", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "MCeXr2RNeiG1XG6V+er1OR0qyeo=", "path": "golang.org/x/crypto/md4", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { - "checksumSHA1": "yPCMw2UX8Jad4GNWSwo8QgZS6TY=", + "checksumSHA1": "EGImhmIP401D+CChQfTscz2RuGE=", "path": "golang.org/x/crypto/ssh", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "ujKeyWHFOYmXm5IgAxfyFCGefsY=", "path": "golang.org/x/crypto/ssh/agent", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { "checksumSHA1": "nqWNlnMmVpt628zzvyo6Yv2CX5Q=", "path": "golang.org/x/crypto/ssh/terminal", - "revision": "b0c9c05bfe149df95eb1d25642162cca051e0466", - "revisionTime": "2017-09-15T01:37:37Z" + "revision": "2509b142fb2b797aa7587dad548f113b2c0f20ce", + "revisionTime": "2017-10-23T14:45:55Z" }, { - "checksumSHA1": "dr5+PfIRzXeN+l1VG+s0lea9qz8=", + "checksumSHA1": "GtamqiJoL7PGHsN454AoffBFMa8=", "path": "golang.org/x/net/context", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", "path": "golang.org/x/net/context/ctxhttp", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { - "checksumSHA1": "cY4u3LCdJxKaS2GbftZjfrOSnNE=", + "checksumSHA1": "SHTyxlWxNjRwA7o3AiBM87PawSA=", "path": "golang.org/x/net/http2", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "ezWhc7n/FtqkLDQKeU2JbW+80tE=", "path": "golang.org/x/net/http2/hpack", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { - "checksumSHA1": "1osdKBIU5mNqyQqiGmnutoTzdJA=", + "checksumSHA1": "H181RWl7GTS90aCWW6v4atV3pAg=", "path": "golang.org/x/net/idna", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "UxahDzW2v4mf/+aFxruuupaoIwo=", "path": "golang.org/x/net/internal/timeseries", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "3xyuaSNmClqG4YWC7g0isQIbUTc=", "path": "golang.org/x/net/lex/httplex", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "u/r66lwYfgg682u5hZG7/E7+VCY=", "path": "golang.org/x/net/trace", - "revision": "8351a756f30f1297fe94bbf4b767ec589c6ea6d0", - "revisionTime": "2017-09-15T01:39:56Z" + "revision": "c73622c77280266305273cb545f54516ced95b93", + "revisionTime": "2017-06-11T01:16:46Z" }, { "checksumSHA1": "HmVJmSDDCwsPJQrp7ml2gXb2szg=", "path": "golang.org/x/oauth2", - "revision": "13449ad91cb26cb47661c1b080790392170385fd", - "revisionTime": "2017-06-22T15:12:08Z" + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" }, { "checksumSHA1": "JTBn9MQUhwHtjwv7rC9Zg4KRN7g=", "path": "golang.org/x/oauth2/google", - "revision": "13449ad91cb26cb47661c1b080790392170385fd", - "revisionTime": "2017-06-22T15:12:08Z" + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" }, { - "checksumSHA1": "vryfsqFS63avRRqF2dR9K40oX2A=", + "checksumSHA1": "4XCNzl3cgGtdd0SrUZHSBdsW+m4=", "path": "golang.org/x/oauth2/internal", - "revision": "13449ad91cb26cb47661c1b080790392170385fd", - "revisionTime": "2017-06-22T15:12:08Z" + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" }, { "checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=", "path": "golang.org/x/oauth2/jws", - "revision": "13449ad91cb26cb47661c1b080790392170385fd", - "revisionTime": "2017-06-22T15:12:08Z" + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" }, { "checksumSHA1": "/eV4E08BY+f1ZikiR7OOMJAj3m0=", "path": "golang.org/x/oauth2/jwt", - "revision": "13449ad91cb26cb47661c1b080790392170385fd", - "revisionTime": "2017-06-22T15:12:08Z" + "revision": "bb50c06baba3d0c76f9d125c0719093e315b5b44", + "revisionTime": "2017-09-28T00:25:42Z" }, { - "checksumSHA1": "A+9lemYS6e/BXlyFTsm9CeK5dKU=", + "checksumSHA1": "J+0Np8tuhF3iFQzW2nyul/8P2uQ=", "path": "golang.org/x/sys/unix", - "revision": "062cd7e4e68206d8bab9b18396626e855c992658", - "revisionTime": "2017-09-12T16:19:26Z" + "revision": "b98136db334ff9cb24f28a68e3be3cb6608f7630", + "revisionTime": "2017-10-27T08:05:40Z" }, { - "checksumSHA1": "pBPFzDGt3AVSRffB7ffiUnruFUk=", + "checksumSHA1": "8BcMOi8XTSigDtV2npDc8vMrS60=", "path": "golang.org/x/sys/windows", - "revision": "062cd7e4e68206d8bab9b18396626e855c992658", - "revisionTime": "2017-09-12T16:19:26Z" + "revision": "b98136db334ff9cb24f28a68e3be3cb6608f7630", + "revisionTime": "2017-10-27T08:05:40Z" }, { "checksumSHA1": "tltivJ/uj/lqLk05IqGfCv2F/E8=", "path": "golang.org/x/text/secure/bidirule", - "revision": "1cbadb444a806fd9430d14ad08967ed91da4fa0a", - "revisionTime": "2017-09-13T19:45:57Z" + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" }, { "checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=", "path": "golang.org/x/text/transform", - "revision": "1cbadb444a806fd9430d14ad08967ed91da4fa0a", - "revisionTime": "2017-09-13T19:45:57Z" + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" }, { "checksumSHA1": "iB6/RoQIzBaZxVi+t7tzbkwZTlo=", "path": "golang.org/x/text/unicode/bidi", - "revision": "1cbadb444a806fd9430d14ad08967ed91da4fa0a", - "revisionTime": "2017-09-13T19:45:57Z" + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" }, { "checksumSHA1": "km/8bLtOpIP7sua4MnEmiSDYTAE=", "path": "golang.org/x/text/unicode/norm", - "revision": "1cbadb444a806fd9430d14ad08967ed91da4fa0a", - "revisionTime": "2017-09-13T19:45:57Z" + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" }, { - "checksumSHA1": "U1OTBlgTRUe9ZdMsbISL1E+eMm8=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/golang.org/x/text/width", + "checksumSHA1": "vqpfIpBlmIbvehshHVEITE0v0F4=", "path": "golang.org/x/text/width", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "6eab0e8f74e86c598ec3b6fad4888e0c11482d48", + "revisionTime": "2017-09-27T13:34:20Z" }, { - "checksumSHA1": "dzy9lJFRFg4ewapupNs5pbhJQxQ=", + "checksumSHA1": "ouLYuajCRKYxNNhPgu11YGnbahI=", "path": "google.golang.org/api/compute/v1", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "/y0saWnM+kTnSvZrNlvoNOgj0Uo=", "path": "google.golang.org/api/gensupport", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "BWKmb7kGYbfbvXO6E7tCpTh9zKE=", "path": "google.golang.org/api/googleapi", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=", "path": "google.golang.org/api/googleapi/internal/uritemplates", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=", "path": "google.golang.org/api/googleapi/transport", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "g9xWra2wrCn40g/IWToJpNDN/i0=", + "checksumSHA1": "vksLwUoFeLILZ83yi70y+eOFdeY=", "path": "google.golang.org/api/iam/v1", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "dENAVft6XToomTHrm5J2zFt4hgU=", + "checksumSHA1": "CpjSGeyQJbLLPxVl/CWs5o9p+jU=", "path": "google.golang.org/api/internal", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=", "path": "google.golang.org/api/iterator", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "QYFBVWO0ZiQQ74rzTsrvQccfzx4=", + "checksumSHA1": "1D3XnZy4TWLBstH2IzOHZHLS9HA=", "path": "google.golang.org/api/oauth2/v2", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "Y3CG3ZFIYfF6AhvpiBMBAGcZMV4=", + "checksumSHA1": "Z9LQvCPO0WV9PdjgIXlfVOGZRlM=", "path": "google.golang.org/api/option", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "TGfdoT48AYtv72z9lOewZzzYwKk=", + "checksumSHA1": "Zd7ojgrWPn3j7fLx9HB6/Oub8lE=", "path": "google.golang.org/api/storage/v1", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { - "checksumSHA1": "a4x7iUjDZxTMoskp8RoBnt4bRrc=", + "checksumSHA1": "ykzqoYJiMCS6LGBq/zszKFbxGeA=", "path": "google.golang.org/api/transport/http", - "revision": "272693b6005d0c0dc1c7933ce47dd51ee50dbedc", - "revisionTime": "2017-09-16T00:03:21Z" + "revision": "fa8b542fb82f32670060f51653bc8aa2ce55daa5", + "revisionTime": "2017-10-27T00:03:20Z" }, { "checksumSHA1": "WPEbk80NB3Esdh4Yk0PXr2K7xVU=", "path": "google.golang.org/appengine", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "4o2JkeR2LyUfZ7BQIzHUejyqKno=", "path": "google.golang.org/appengine/internal", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=", "path": "google.golang.org/appengine/internal/app_identity", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=", "path": "google.golang.org/appengine/internal/base", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=", "path": "google.golang.org/appengine/internal/datastore", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=", "path": "google.golang.org/appengine/internal/log", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=", "path": "google.golang.org/appengine/internal/modules", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=", "path": "google.golang.org/appengine/internal/remote_api", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=", "path": "google.golang.org/appengine/internal/urlfetch", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=", "path": "google.golang.org/appengine/urlfetch", - "revision": "d9a072cfa7b9736e44311ef77b3e09d804bfa599", - "revisionTime": "2017-08-14T19:09:42Z" + "revision": "a2e0dc829727a4f957a7428b1f322805cfc1f362", + "revisionTime": "2017-10-11T21:50:12Z" }, { "checksumSHA1": "B22iMMY2vi1Q9kseWb/ZznpW8lQ=", "path": "google.golang.org/genproto/googleapis/api/annotations", - "revision": "595979c8a7bf586b2d293fb42246bf91a0b893d9", - "revisionTime": "2017-09-04T05:01:39Z" + "revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6", + "revisionTime": "2017-10-02T23:26:14Z" }, { "checksumSHA1": "m5IWVQJ4fVYc3b+5OrZ7BdNlvkA=", "path": "google.golang.org/genproto/googleapis/iam/v1", - "revision": "595979c8a7bf586b2d293fb42246bf91a0b893d9", - "revisionTime": "2017-09-04T05:01:39Z" + "revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6", + "revisionTime": "2017-10-02T23:26:14Z" }, { - "checksumSHA1": "AvVpgwhxhJgjoSledwDtYrEKVE4=", + "checksumSHA1": "Tc3BU26zThLzcyqbVtiSEp7EpU8=", "path": "google.golang.org/genproto/googleapis/rpc/status", - "revision": "595979c8a7bf586b2d293fb42246bf91a0b893d9", - "revisionTime": "2017-09-04T05:01:39Z" + "revision": "f676e0f3ac6395ff1a529ae59a6670878a8371a6", + "revisionTime": "2017-10-02T23:26:14Z" }, { - "checksumSHA1": "f6mUuyE+Und9bzlmSwp11fztD7s=", + "checksumSHA1": "oEv2CEQW7EqPB5d1cISRHirSS88=", "path": "google.golang.org/grpc", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "AS14kO6FrzuQ/8wP+a/n55SNW6U=", + "checksumSHA1": "HoJvHF9RxOinJPAAbAhfZSNUxBY=", "path": "google.golang.org/grpc/balancer", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" + }, + { + "checksumSHA1": "os98urLvZVriKRbHhIsipJfcT7Q=", + "path": "google.golang.org/grpc/balancer/roundrobin", + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "Dkjgw1HasWvqct0IuiZdjbD7O0c=", "path": "google.golang.org/grpc/codes", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=", "path": "google.golang.org/grpc/connectivity", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "5ylThBvJnIcyWhL17AC9+Sdbw2E=", + "checksumSHA1": "EEpFv96tNVcG9Z42ehroNbG/VKI=", "path": "google.golang.org/grpc/credentials", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "WxP3QV0Y4fIx5NsT0dwBp6JsrJE=", + "checksumSHA1": "H7SuPUqbPcdbNqgl+k3ohuwMAwE=", "path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=", "path": "google.golang.org/grpc/grpclog", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "/M6Lug7Dj22dZNu4X6bZDVa5mkQ=", "path": "google.golang.org/grpc/health", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "6vY7tYjV84pnr3sDctzx53Bs8b0=", "path": "google.golang.org/grpc/health/grpc_health_v1", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "U9vDe05/tQrvFBojOQX8Xk12W9I=", "path": "google.golang.org/grpc/internal", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=", "path": "google.golang.org/grpc/keepalive", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "KeUmTZV+2X46C49cKyjp+xM7fvw=", "path": "google.golang.org/grpc/metadata", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "dgwdT20kXe4ZbXBOFbTwVQt8rmA=", "path": "google.golang.org/grpc/naming", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=", "path": "google.golang.org/grpc/peer", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "bJJaQUbZqEpTlnXTXgGwOZWijs4=", + "checksumSHA1": "H7VyP18nJ9MmoB5r9+I7EKVEeVM=", "path": "google.golang.org/grpc/resolver", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "mN8hVA1AgNCqERfkX/4l/EM6srY=", + "checksumSHA1": "WpWF+bDzObsHf+bjoGpb/abeFxo=", + "path": "google.golang.org/grpc/resolver/dns", + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" + }, + { + "checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=", + "path": "google.golang.org/grpc/resolver/passthrough", + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" + }, + { + "checksumSHA1": "G9lgXNi7qClo5sM2s6TbTHLFR3g=", "path": "google.golang.org/grpc/stats", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "3Dwz4RLstDHMPyDA7BUsYe+JP4w=", "path": "google.golang.org/grpc/status", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=", "path": "google.golang.org/grpc/tap", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { - "checksumSHA1": "0x1PeRRZrezdlHZZUK6pNRCKfOc=", + "checksumSHA1": "k7R0aB/gX+jyo9LHBJ5419OFLoA=", "path": "google.golang.org/grpc/transport", - "revision": "bb78878767b96d411e740439ac820f118e95ae2f", - "revisionTime": "2017-09-15T00:40:38Z" + "revision": "a4ff4e29c4bb071c28d9a81571c0d8bbdc644f9f", + "revisionTime": "2017-10-26T23:03:44Z" }, { "checksumSHA1": "xsaHqy6/sonLV6xIxTNh4FfkWbU=", @@ -2052,172 +2035,140 @@ "revisionTime": "2017-08-12T16:00:11Z" }, { - "checksumSHA1": "9Zj/HZTQTW8j2n2mcVKGA875rpo=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/api/errors", + "checksumSHA1": "iGhGLuzo/RvZapLDylR4A3F2zPg=", "path": "k8s.io/apimachinery/pkg/api/errors", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "QqnlOlmLEchB3EtUc3c46eGngUA=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource", + "checksumSHA1": "WT/IW4NJz8vg/TYQDOCysJHFnS8=", "path": "k8s.io/apimachinery/pkg/api/resource", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "RrwPUbyprtkpsQODF1/BwbVtS8c=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1", + "checksumSHA1": "OfPWJLjBJe0IHDYH0j7Ug82/g1M=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "FO6DXFv4jjIqq98S4v0ke9fQSRk=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/conversion", + "checksumSHA1": "Zv+sRDZkzH9GJBmcagRJhEOBndw=", "path": "k8s.io/apimachinery/pkg/conversion", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "Cn/QaiTl+Yioenr4/XvdacapSwg=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/conversion/queryparams", + "checksumSHA1": "GA82FBmiCzVIXGUnTZtz5z5Er0s=", "path": "k8s.io/apimachinery/pkg/conversion/queryparams", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "ntWCLzy4srxUZ2YRTLcDU1TJd2I=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/fields", + "checksumSHA1": "JKIs3haOjRLmpL0UDXfaz7QX7Sk=", "path": "k8s.io/apimachinery/pkg/fields", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "7QvUwGY3z4qW5rpDszLmxL/Xj9g=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/labels", + "checksumSHA1": "KponW1gm4wYZFKwwlXHYEIn+zhI=", "path": "k8s.io/apimachinery/pkg/labels", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "Miw1lcQ2E+AGd7AYOFIHO3+fYww=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/openapi", - "path": "k8s.io/apimachinery/pkg/openapi", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" - }, - { - "checksumSHA1": "l2K44V84AJTWq4X/fVFPFTbTSF0=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/runtime", + "checksumSHA1": "CBKogLKthxnboTxi9tY3qJZ8vCw=", "path": "k8s.io/apimachinery/pkg/runtime", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "r/9XGSppO8GWoOdsiaivB0A6PYM=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema", + "checksumSHA1": "sAumyh8dKLMY2RAIXEDUuv6E8OM=", "path": "k8s.io/apimachinery/pkg/runtime/schema", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "f7yWRd6DjlxINbFru0y2INrjmKE=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/selection", + "checksumSHA1": "jjMq79D66L64Ku50azu5VuL6J+I=", "path": "k8s.io/apimachinery/pkg/selection", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "fJG2oyXJMDa3Ocizs3zI8B9Skiw=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/types", + "checksumSHA1": "bw1ACevKtvhp8qmK/V3A7+JkpAM=", "path": "k8s.io/apimachinery/pkg/types", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "QJBvzXcMRCebScbasdgkuCeX45U=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/errors", + "checksumSHA1": "Jd2ZDrgkxdr7R9XohVdZmOWihbw=", "path": "k8s.io/apimachinery/pkg/util/errors", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "jfOySk/zLRaEERws6wMQftj+G1A=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr", + "checksumSHA1": "RRDMwpjGIkOTLMyubRLJmapE+Ek=", "path": "k8s.io/apimachinery/pkg/util/intstr", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "JrNYZWRvwT5Mbo+jKfYQ8bXmb9M=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/net", + "checksumSHA1": "zz3cUfvgreG2N5eIv1w7VPSHwH0=", "path": "k8s.io/apimachinery/pkg/util/net", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "O4AeWEwGL4K0gffhQdsb1DUIF7I=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime", + "checksumSHA1": "zbbftg6fKoFsl9reUMMLrVR76KI=", "path": "k8s.io/apimachinery/pkg/util/runtime", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "lcIoSJOwJL5VezuuAaEZ3/wvgO4=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets", + "checksumSHA1": "2p066REe+o1932lveLsySRedt9E=", "path": "k8s.io/apimachinery/pkg/util/sets", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "/xfWcJuMbCdT6YPsVwHHJwtK1Gc=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/validation", + "checksumSHA1": "k60MqyoSBev3Ybs/G297aRv8uA0=", "path": "k8s.io/apimachinery/pkg/util/validation", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "yqQVeb2wcQUVa0H5L5wF9T9UhAY=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/validation/field", + "checksumSHA1": "QmUaaalnjN+2cYMB5Ycf4ia93C0=", "path": "k8s.io/apimachinery/pkg/util/validation/field", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "nKCYXkp430RHeMqmnUSxdyyLCRs=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/util/wait", + "checksumSHA1": "YudUrts7DUXaHq+Y1euWfIuOwBg=", "path": "k8s.io/apimachinery/pkg/util/wait", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "mE8V0g3OteuJ5DsapDfUTiHIYMI=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/pkg/watch", + "checksumSHA1": "6h4r1R/JbcTl0BI55jV7tJFe0dQ=", "path": "k8s.io/apimachinery/pkg/watch", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "uQunvnsICiowJl3Rs46eT2OtFqg=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect", + "checksumSHA1": "s35dSpFQP7xMIF/+fEFgJSD0h4o=", "path": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "d9697193dfed7468f65da0d9d231c20b634425df", + "revisionTime": "2017-10-25T19:24:24Z" }, { - "checksumSHA1": "r2FYS8ugdf6QWV8tVI5l5K3I8t4=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/client-go/pkg/apis/authentication", - "path": "k8s.io/client-go/pkg/apis/authentication", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" - }, - { - "checksumSHA1": "/8H0XjCdePGQ5IYpl4lA7b1MLvo=", - "origin": "github.com/hashicorp/vault-plugin-auth-kubernetes/vendor/k8s.io/client-go/pkg/apis/authentication/v1", "path": "k8s.io/client-go/pkg/apis/authentication/v1", - "revision": "e6ff3b4fefe641225a7a81013337b3b62027b3a0", - "revisionTime": "2017-09-19T14:00:28Z" + "revision": "" + }, + { + "checksumSHA1": "/zjulDhlMogVSOhPGM9UlDWyFuo=", + "path": "k8s.io/kube-openapi/pkg/common", + "revision": "61b46af70dfed79c6d24530cd23b41440a7f22a5", + "revisionTime": "2017-10-24T18:23:55Z" }, { "checksumSHA1": "XQ5CfhpL7XdMmELFnc2Yds45cbk=", From d573b4637cb03c615926f9b583b1f7b9d998fbfd Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 16:12:14 -0400 Subject: [PATCH 040/329] Update kube stuff --- .../vault-plugin-auth-kubernetes/Gopkg.lock | 24 +- .../vault-plugin-auth-kubernetes/Gopkg.toml | 6 +- .../token_review.go | 2 +- vendor/k8s.io/api/LICENSE | 202 +++ vendor/k8s.io/api/authentication/v1/BUILD | 46 + vendor/k8s.io/api/authentication/v1/doc.go | 20 + .../api/authentication/v1/generated.pb.go | 1302 +++++++++++++++++ .../api/authentication/v1/generated.proto | 99 ++ .../k8s.io/api/authentication/v1/register.go | 51 + vendor/k8s.io/api/authentication/v1/types.go | 107 ++ .../v1/types_swagger_doc_generated.go | 72 + .../v1/zz_generated.deepcopy.go | 147 ++ vendor/k8s.io/apimachinery/LICENSE | 202 +++ .../apimachinery/pkg/apis/meta/v1/BUILD | 10 + .../pkg/apis/meta/v1/conversion.go | 1 - .../apimachinery/pkg/apis/meta/v1/time.go | 8 +- .../apimachinery/pkg/labels/selector.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 4 +- vendor/vendor.json | 158 +- 19 files changed, 2368 insertions(+), 95 deletions(-) create mode 100644 vendor/k8s.io/api/LICENSE create mode 100644 vendor/k8s.io/api/authentication/v1/BUILD create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/LICENSE diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock index 1afbdfd95c..febc097960 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock +++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.lock @@ -254,20 +254,26 @@ revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" [[projects]] - branch = "release-1.7" - name = "k8s.io/apimachinery" - packages = ["pkg/api/errors","pkg/api/resource","pkg/apis/meta/v1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/openapi","pkg/runtime","pkg/runtime/schema","pkg/selection","pkg/types","pkg/util/errors","pkg/util/intstr","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/watch","third_party/forked/golang/reflect"] - revision = "8ab5f3d8a330c2e9baaf84e39042db8d49034ae2" + branch = "release-1.8" + name = "k8s.io/api" + packages = ["authentication/v1"] + revision = "6c6dac0277229b9e9578c5ca3f74a4345d35cdc2" [[projects]] - name = "k8s.io/client-go" - packages = ["pkg/apis/authentication","pkg/apis/authentication/v1"] - revision = "d92e8497f71b7b4e0494e5bd204b48d34bd6f254" - version = "v4.0.0" + branch = "release-1.8" + name = "k8s.io/apimachinery" + packages = ["pkg/api/errors","pkg/api/resource","pkg/apis/meta/v1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/selection","pkg/types","pkg/util/errors","pkg/util/intstr","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/watch","third_party/forked/golang/reflect"] + revision = "019ae5ada31de202164b118aee88ee2d14075c31" + +[[projects]] + branch = "master" + name = "k8s.io/kube-openapi" + packages = ["pkg/common"] + revision = "61b46af70dfed79c6d24530cd23b41440a7f22a5" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "59a23adc8860feeb0e30da792c2fe8ed5f7c2bcee34c115fe55e93e5f2d9d9b7" + inputs-digest = "dfb0ef4889225ea4843fef5ed665c15d889d45d0a0e513a099655000e11a86eb" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.toml b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.toml index c20ef2ba88..785d177a99 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.toml +++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/Gopkg.toml @@ -46,9 +46,9 @@ name = "github.com/mitchellh/mapstructure" [[constraint]] - branch = "release-1.7" + branch = "release-1.8" name = "k8s.io/apimachinery" [[constraint]] - name = "k8s.io/client-go" - version = "4.0.0" + name = "k8s.io/api" + branch = "release-1.8" diff --git a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/token_review.go b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/token_review.go index 01704cbb11..d1b24a5825 100644 --- a/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/token_review.go +++ b/vendor/github.com/hashicorp/vault-plugin-auth-kubernetes/token_review.go @@ -12,11 +12,11 @@ import ( "strings" cleanhttp "github.com/hashicorp/go-cleanhttp" + authv1 "k8s.io/api/authentication/v1" kubeerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - authv1 "k8s.io/client-go/pkg/apis/authentication/v1" ) // This is the result from the token review diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/api/authentication/v1/BUILD b/vendor/k8s.io/api/authentication/v1/BUILD new file mode 100644 index 0000000000..3a378d34e6 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/BUILD @@ -0,0 +1,46 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "generated.pb.go", + "register.go", + "types.go", + "types_swagger_doc_generated.go", + "zz_generated.deepcopy.go", + ], + importpath = "k8s.io/api/authentication/v1", + deps = [ + "//vendor/github.com/gogo/protobuf/proto:go_default_library", + "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + +filegroup( + name = "go_default_library_protos", + srcs = ["generated.proto"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go new file mode 100644 index 0000000000..15b117a4c9 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true +package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go new file mode 100644 index 0000000000..e736789398 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -0,0 +1,1302 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") +} +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Authenticated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x22 + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 642 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xb5, 0xf3, 0x51, 0x25, 0x1b, 0x0a, 0x65, 0x25, 0xa4, 0x28, 0x02, 0x27, 0x0a, 0x12, 0xca, + 0x81, 0xae, 0x49, 0x41, 0xa5, 0x2a, 0x12, 0x12, 0x16, 0x11, 0xf4, 0x80, 0x2a, 0x2d, 0xb4, 0x48, + 0x9c, 0xd8, 0x38, 0x53, 0xc7, 0xa4, 0xfe, 0xd0, 0x7a, 0x6d, 0xe8, 0xad, 0x3f, 0x81, 0x23, 0xdc, + 0xf8, 0x17, 0x1c, 0xb9, 0xf6, 0xd8, 0x63, 0x0f, 0xa8, 0xa2, 0xe6, 0x8f, 0xa0, 0x5d, 0x2f, 0x4d, + 0xda, 0xd2, 0xd0, 0x9b, 0xf7, 0xcd, 0x7b, 0x6f, 0x67, 0x9e, 0x77, 0xd0, 0x60, 0xb2, 0x96, 0x10, + 0x3f, 0xb2, 0x27, 0xe9, 0x10, 0x78, 0x08, 0x02, 0x12, 0x3b, 0x83, 0x70, 0x14, 0x71, 0x5b, 0x17, + 0x58, 0xec, 0xdb, 0x2c, 0x15, 0x63, 0x08, 0x85, 0xef, 0x32, 0xe1, 0x47, 0xa1, 0x9d, 0xf5, 0x6d, + 0x0f, 0x42, 0xe0, 0x4c, 0xc0, 0x88, 0xc4, 0x3c, 0x12, 0x11, 0xbe, 0x5d, 0xb0, 0x09, 0x8b, 0x7d, + 0x72, 0x96, 0x4d, 0xb2, 0x7e, 0x6b, 0xd9, 0xf3, 0xc5, 0x38, 0x1d, 0x12, 0x37, 0x0a, 0x6c, 0x2f, + 0xf2, 0x22, 0x5b, 0x89, 0x86, 0xe9, 0x8e, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xcc, 0x5a, 0x8f, 0xa6, + 0x57, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0x3d, 0x3b, 0x9e, 0x78, 0x12, 0x48, 0xec, 0x00, 0x04, + 0xfb, 0x47, 0x0b, 0x2d, 0xfb, 0x32, 0x15, 0x4f, 0x43, 0xe1, 0x07, 0x70, 0x41, 0xb0, 0xfa, 0x3f, + 0x41, 0xe2, 0x8e, 0x21, 0x60, 0x17, 0x74, 0x0f, 0x2f, 0xd3, 0xa5, 0xc2, 0xdf, 0xb5, 0xfd, 0x50, + 0x24, 0x82, 0x9f, 0x17, 0x75, 0x1f, 0x23, 0x34, 0xf8, 0x24, 0x38, 0xdb, 0x66, 0xbb, 0x29, 0xe0, + 0x36, 0xaa, 0xfa, 0x02, 0x82, 0xa4, 0x69, 0x76, 0xca, 0xbd, 0xba, 0x53, 0xcf, 0x8f, 0xdb, 0xd5, + 0x0d, 0x09, 0xd0, 0x02, 0x5f, 0xaf, 0x7d, 0xf9, 0xd6, 0x36, 0xf6, 0x7f, 0x76, 0x8c, 0xee, 0xd7, + 0x12, 0x6a, 0xbc, 0x89, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x88, 0xdf, 0xa3, 0x9a, 0x4c, 0x60, + 0xc4, 0x04, 0x6b, 0x9a, 0x1d, 0xb3, 0xd7, 0x58, 0x79, 0x40, 0xa6, 0xe1, 0x9f, 0x36, 0x44, 0xe2, + 0x89, 0x27, 0x81, 0x84, 0x48, 0x36, 0xc9, 0xfa, 0x64, 0x73, 0xf8, 0x01, 0x5c, 0xf1, 0x0a, 0x04, + 0x73, 0xf0, 0xc1, 0x71, 0xdb, 0xc8, 0x8f, 0xdb, 0x68, 0x8a, 0xd1, 0x53, 0x57, 0xbc, 0x89, 0x2a, + 0x49, 0x0c, 0x6e, 0xb3, 0xa4, 0xdc, 0x97, 0xc9, 0xbc, 0x5f, 0x4b, 0x66, 0x5a, 0x7b, 0x1d, 0x83, + 0xeb, 0x5c, 0xd3, 0xd6, 0x15, 0x79, 0xa2, 0xca, 0x08, 0xbf, 0x45, 0x0b, 0x89, 0x60, 0x22, 0x4d, + 0x9a, 0x65, 0x65, 0x69, 0x5f, 0xdd, 0x52, 0xc9, 0x9c, 0xeb, 0xda, 0x74, 0xa1, 0x38, 0x53, 0x6d, + 0xd7, 0x5d, 0x45, 0x37, 0xce, 0xdd, 0x8f, 0xef, 0xa2, 0xaa, 0x90, 0x90, 0xca, 0xa6, 0xee, 0x2c, + 0x6a, 0x65, 0xb5, 0xe0, 0x15, 0xb5, 0xee, 0x0f, 0x13, 0xdd, 0xbc, 0x70, 0x0b, 0x7e, 0x82, 0x16, + 0x67, 0x9a, 0x81, 0x91, 0xb2, 0xa8, 0x39, 0xb7, 0xb4, 0xc5, 0xe2, 0xb3, 0xd9, 0x22, 0x3d, 0xcb, + 0xc5, 0x2f, 0x51, 0x25, 0x4d, 0x80, 0xeb, 0xd0, 0xee, 0xcd, 0x9f, 0x70, 0x2b, 0x01, 0xbe, 0x11, + 0xee, 0x44, 0xd3, 0xb4, 0x24, 0x42, 0x95, 0x83, 0x9c, 0x00, 0x38, 0x8f, 0xb8, 0x0a, 0x6b, 0x66, + 0x82, 0x81, 0x04, 0x69, 0x51, 0xeb, 0x7e, 0x2f, 0xa1, 0xda, 0x5f, 0x17, 0x7c, 0x1f, 0xd5, 0xa4, + 0x32, 0x64, 0x01, 0xe8, 0xb1, 0x97, 0xb4, 0x48, 0x71, 0x24, 0x4e, 0x4f, 0x19, 0xf8, 0x0e, 0x2a, + 0xa7, 0xfe, 0x48, 0x35, 0x5a, 0x77, 0x1a, 0x9a, 0x58, 0xde, 0xda, 0x78, 0x4e, 0x25, 0x8e, 0xbb, + 0x68, 0xc1, 0xe3, 0x51, 0x1a, 0xcb, 0x9f, 0x25, 0xdf, 0x26, 0x92, 0xb9, 0xbf, 0x50, 0x08, 0xd5, + 0x15, 0xbc, 0x8d, 0xaa, 0x20, 0x1f, 0x73, 0xb3, 0xd2, 0x29, 0xf7, 0x1a, 0x2b, 0xfd, 0xab, 0x4d, + 0x4b, 0xd4, 0x02, 0x0c, 0x42, 0xc1, 0xf7, 0x66, 0xa6, 0x92, 0x18, 0x2d, 0xec, 0x5a, 0x43, 0xbd, + 0x24, 0x8a, 0x83, 0x97, 0x50, 0x79, 0x02, 0x7b, 0xc5, 0x44, 0x54, 0x7e, 0xe2, 0xa7, 0xa8, 0x9a, + 0xc9, 0xfd, 0xd1, 0x29, 0xf7, 0xe6, 0xdf, 0x3b, 0xdd, 0x37, 0x5a, 0xc8, 0xd6, 0x4b, 0x6b, 0xa6, + 0xd3, 0x3b, 0x38, 0xb1, 0x8c, 0xc3, 0x13, 0xcb, 0x38, 0x3a, 0xb1, 0x8c, 0xfd, 0xdc, 0x32, 0x0f, + 0x72, 0xcb, 0x3c, 0xcc, 0x2d, 0xf3, 0x28, 0xb7, 0xcc, 0x5f, 0xb9, 0x65, 0x7e, 0xfe, 0x6d, 0x19, + 0xef, 0x4a, 0x59, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x3a, 0x3c, 0x31, 0x1b, 0x05, + 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto new file mode 100644 index 0000000000..fb7888b632 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -0,0 +1,99 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.authentication.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go new file mode 100644 index 0000000000..936237c2be --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go new file mode 100644 index 0000000000..b6d30bbe61 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -0,0 +1,107 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key will be every after the prefix. + // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple + // times to have multiple elements in the slice under a single key + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..bb235e4eaa --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c1717c1cd8 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,147 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +// +// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) + return nil + }, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) + return nil + }, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) + return nil + }, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { + in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) + return nil + }, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReview) DeepCopyInto(out *TokenReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. +func (in *TokenReview) DeepCopy() *TokenReview { + if in == nil { + return nil + } + out := new(TokenReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. +func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { + if in == nil { + return nil + } + out := new(TokenReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { + *out = *in + in.User.DeepCopyInto(&out.User) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. +func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { + if in == nil { + return nil + } + out := new(TokenReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInfo) DeepCopyInto(out *UserInfo) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + (*out)[key] = make(ExtraValue, len(val)) + copy((*out)[key], val) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. +func (in *UserInfo) DeepCopy() *UserInfo { + if in == nil { + return nil + } + out := new(UserInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD index 7354f5e2fa..4a96c3f948 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD @@ -93,3 +93,13 @@ filegroup( srcs = ["generated.proto"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_xtest", + srcs = ["conversion_test.go"], + importpath = "k8s.io/apimachinery/pkg/apis/meta/v1_test", + deps = [ + "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + ], +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 7049c9a33b..a96f38ee21 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -252,7 +252,6 @@ func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelS if in == nil { return nil } - out = new(LabelSelector) for labelKey, labelValue := range *in { AddLabelToSelector(out, labelKey, labelValue) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 435f6a8f59..0a9f2a3775 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -80,7 +80,13 @@ func (t *Time) Before(u *Time) bool { // Equal reports whether the time instant t is equal to u. func (t *Time) Equal(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // Unix returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index ac123033a0..b301b42840 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -550,7 +550,7 @@ func (p *Parser) lookahead(context ParserContext) (Token, string) { return tok, lit } -// consume returns current token and string. Increments the the position +// consume returns current token and string. Increments the position func (p *Parser) consume(context ParserContext) (Token, string) { p.position++ tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 748174e191..442dde7df2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -128,7 +128,9 @@ func (r *rudimentaryErrorBackoff) OnError(error) { r.lastErrorTimeLock.Lock() defer r.lastErrorTimeLock.Unlock() d := time.Since(r.lastErrorTime) - if d < r.minPeriod { + if d < r.minPeriod && d >= 0 { + // If the time moves backwards for any reason, do nothing + // TODO: remove check "d >= 0" after go 1.8 is no longer supported time.Sleep(r.minPeriod - d) } r.lastErrorTime = time.Now() diff --git a/vendor/vendor.json b/vendor/vendor.json index ce00f12565..fac77d1374 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -501,80 +501,80 @@ { "checksumSHA1": "7BC2/27NId9xaPDB5w3nWN2mn9A=", "path": "github.com/coreos/etcd/auth/authpb", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "XtVAwbJWD12FGuZrIkxpe8t9TB8=", "path": "github.com/coreos/etcd/client", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "4m3qRCZfmTyRSzd4eH8ovMb5OXY=", "path": "github.com/coreos/etcd/clientv3", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "LpOgTec6cz2Tf3zDav7VkqMHmBM=", "path": "github.com/coreos/etcd/clientv3/concurrency", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "VMC9J0rMVk3Fv8r8Bj7qqLlXc3E=", "path": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "c0ltvGUOnk8qaEshFwc0PDH5nbc=", "path": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "JAkX9DfIBrSe0vUa07xl5cikxVQ=", "path": "github.com/coreos/etcd/mvcc/mvccpb", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "mKIXx1kDwmVmdIpZ3pJtRBuUKso=", "path": "github.com/coreos/etcd/pkg/pathutil", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "z+C4BtPa8wbOUKW5dmHyhNnTulg=", "path": "github.com/coreos/etcd/pkg/srv", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "rMyIh9PsSvPs6Yd+YgKITQzQJx8=", "path": "github.com/coreos/etcd/pkg/tlsutil", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "agofzi+YZ7VYbxCldLaHYHAtlpc=", "path": "github.com/coreos/etcd/pkg/transport", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "gx1gJIMU6T0UNQ0bPZ/drQ8cpCI=", "path": "github.com/coreos/etcd/pkg/types", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "sp2FkEyaIGiQFOEZCTDkBZgyHOs=", "path": "github.com/coreos/etcd/version", - "revision": "0fcafcb828f82a0d7cdc2801c0d4667dd4686c06", - "revisionTime": "2017-10-27T18:46:46Z" + "revision": "2cea13ba680fdb7b7763cc191545fb12ee72c00c", + "revisionTime": "2017-10-27T19:23:22Z" }, { "checksumSHA1": "97BsbXOiZ8+Kr+LIuZkQFtSj7H4=", @@ -1135,10 +1135,10 @@ "revisionTime": "2017-10-09T12:38:52Z" }, { - "checksumSHA1": "dy0XIP1KUS6QA0HEL8nN+9YIghU=", + "checksumSHA1": "gErokKgkKMOOuEhnIroZKtiMojY=", "path": "github.com/hashicorp/vault-plugin-auth-kubernetes", - "revision": "bf36d311cbb89cfb09ffe1cfdbe70c10eb861cdc", - "revisionTime": "2017-10-05T23:21:19Z" + "revision": "d5afd9bb38f35be7db3623de23b3ea130ab4c836", + "revisionTime": "2017-10-27T20:03:03Z" }, { "checksumSHA1": "ZhK6IO2XN81Y+3RAjTcVm1Ic7oU=", @@ -2034,135 +2034,137 @@ "revision": "eb3733d160e74a9c7e442f435eb3bea458e1d19f", "revisionTime": "2017-08-12T16:00:11Z" }, + { + "checksumSHA1": "93jvVkbZbmnfpghchCh04SG7wfQ=", + "path": "k8s.io/api/authentication/v1", + "revision": "218912509d74a117d05a718bb926d0948e531c20", + "revisionTime": "2017-10-26T20:24:34Z" + }, { "checksumSHA1": "iGhGLuzo/RvZapLDylR4A3F2zPg=", "path": "k8s.io/apimachinery/pkg/api/errors", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "WT/IW4NJz8vg/TYQDOCysJHFnS8=", "path": "k8s.io/apimachinery/pkg/api/resource", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { - "checksumSHA1": "OfPWJLjBJe0IHDYH0j7Ug82/g1M=", + "checksumSHA1": "pTFrxx4tfacwHuO6l8F7qoY0CUc=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "Zv+sRDZkzH9GJBmcagRJhEOBndw=", "path": "k8s.io/apimachinery/pkg/conversion", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "GA82FBmiCzVIXGUnTZtz5z5Er0s=", "path": "k8s.io/apimachinery/pkg/conversion/queryparams", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "JKIs3haOjRLmpL0UDXfaz7QX7Sk=", "path": "k8s.io/apimachinery/pkg/fields", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { - "checksumSHA1": "KponW1gm4wYZFKwwlXHYEIn+zhI=", + "checksumSHA1": "sVf1LHZw/B8ogK1ObbPhudKgJfs=", "path": "k8s.io/apimachinery/pkg/labels", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "CBKogLKthxnboTxi9tY3qJZ8vCw=", "path": "k8s.io/apimachinery/pkg/runtime", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "sAumyh8dKLMY2RAIXEDUuv6E8OM=", "path": "k8s.io/apimachinery/pkg/runtime/schema", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "jjMq79D66L64Ku50azu5VuL6J+I=", "path": "k8s.io/apimachinery/pkg/selection", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "bw1ACevKtvhp8qmK/V3A7+JkpAM=", "path": "k8s.io/apimachinery/pkg/types", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "Jd2ZDrgkxdr7R9XohVdZmOWihbw=", "path": "k8s.io/apimachinery/pkg/util/errors", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "RRDMwpjGIkOTLMyubRLJmapE+Ek=", "path": "k8s.io/apimachinery/pkg/util/intstr", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "zz3cUfvgreG2N5eIv1w7VPSHwH0=", "path": "k8s.io/apimachinery/pkg/util/net", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { - "checksumSHA1": "zbbftg6fKoFsl9reUMMLrVR76KI=", + "checksumSHA1": "geDTFhVkWXa5MnhuJ/AAYLGgq3I=", "path": "k8s.io/apimachinery/pkg/util/runtime", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "2p066REe+o1932lveLsySRedt9E=", "path": "k8s.io/apimachinery/pkg/util/sets", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "k60MqyoSBev3Ybs/G297aRv8uA0=", "path": "k8s.io/apimachinery/pkg/util/validation", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "QmUaaalnjN+2cYMB5Ycf4ia93C0=", "path": "k8s.io/apimachinery/pkg/util/validation/field", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "YudUrts7DUXaHq+Y1euWfIuOwBg=", "path": "k8s.io/apimachinery/pkg/util/wait", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "6h4r1R/JbcTl0BI55jV7tJFe0dQ=", "path": "k8s.io/apimachinery/pkg/watch", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "s35dSpFQP7xMIF/+fEFgJSD0h4o=", "path": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "revision": "d9697193dfed7468f65da0d9d231c20b634425df", - "revisionTime": "2017-10-25T19:24:24Z" - }, - { - "path": "k8s.io/client-go/pkg/apis/authentication/v1", - "revision": "" + "revision": "18a564baac720819100827c16fdebcadb05b2d0d", + "revisionTime": "2017-10-26T18:46:55Z" }, { "checksumSHA1": "/zjulDhlMogVSOhPGM9UlDWyFuo=", From bba371c7ded3152d95a918332c627c5115982aa0 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 16:43:26 -0400 Subject: [PATCH 041/329] Fix C&P in docs. Fixes #3454 --- website/source/api/secret/transit/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/secret/transit/index.html.md b/website/source/api/secret/transit/index.html.md index 5520b2493f..fa4de364a1 100644 --- a/website/source/api/secret/transit/index.html.md +++ b/website/source/api/secret/transit/index.html.md @@ -556,7 +556,7 @@ then made available to trusted users. part of the URL. - `name` `(string: )` – Specifies the name of the encryption key to - re-encrypt against. This is specified as part of the URL. + use to encrypt the datakey. This is specified as part of the URL. - `context` `(string: "")` – Specifies the key derivation context, provided as a base64-encoded string. This must be provided if derivation is enabled. From d538dc13ba4aebbd6dfcb470a4f55754fce64e51 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 27 Oct 2017 17:28:50 -0400 Subject: [PATCH 042/329] Update seal type names --- vault/seal.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vault/seal.go b/vault/seal.go index 92d727e4a8..189482c10e 100644 --- a/vault/seal.go +++ b/vault/seal.go @@ -42,8 +42,8 @@ const ( const ( SealTypeShamir = "shamir" - SealTypePKCS11 = "hsm-pkcs11-auto" - SealTypeAWSKMS = "awskms-auto" + SealTypePKCS11 = "pkcs11" + SealTypeAWSKMS = "awskms" SealTypeTest = "test-auto" RecoveryTypeUnsupported = "unsupported" From ad6b4df9a8980c8750e8f2a5e4c27936584cda39 Mon Sep 17 00:00:00 2001 From: Nathan Valentine Date: Mon, 30 Oct 2017 09:04:38 -0700 Subject: [PATCH 043/329] Should these names not reference Vault? (#3506) Since we are in the Vault docs, should these names not reference Vault instead of Nomad? --- website/source/docs/configuration/listener/tcp.html.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/configuration/listener/tcp.html.md b/website/source/docs/configuration/listener/tcp.html.md index 2605b46142..cf677145ac 100644 --- a/website/source/docs/configuration/listener/tcp.html.md +++ b/website/source/docs/configuration/listener/tcp.html.md @@ -86,8 +86,8 @@ This example shows enabling a TLS listener. ```hcl listener "tcp" { - tls_cert_file = "/etc/certs/nomad.crt" - tls_key_file = "/etc/certs/nomad.key" + tls_cert_file = "/etc/certs/vault.crt" + tls_key_file = "/etc/certs/vault.key" } ``` From 3e81fe4c623138314da5b5080eaea5424e5b4bae Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 30 Oct 2017 15:05:47 -0500 Subject: [PATCH 044/329] Simplify TTL/MaxTTL logic in SSH CA paths and sane with the rest of how (#3507) Vault parses/returns TTLs. --- builtin/logical/ssh/path_roles.go | 67 ++++++++++--------------------- builtin/logical/ssh/path_sign.go | 42 +++++++++---------- 2 files changed, 40 insertions(+), 69 deletions(-) diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 6be96b6ac6..de2f1d5ffd 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -175,7 +175,7 @@ func pathRoles(b *backend) *framework.Path { `, }, "ttl": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] The lease duration if no specific lease duration is @@ -184,7 +184,7 @@ func pathRoles(b *backend) *framework.Path { the value of max_ttl.`, }, "max_ttl": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Description: ` [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] The maximum allowed lease duration @@ -433,9 +433,9 @@ func (b *backend) pathRoleWrite(req *logical.Request, d *framework.FieldData) (* } func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework.FieldData) (*sshRole, *logical.Response) { + ttl := time.Duration(data.Get("ttl").(int)) * time.Second + maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second role := &sshRole{ - MaxTTL: data.Get("max_ttl").(string), - TTL: data.Get("ttl").(string), AllowedCriticalOptions: data.Get("allowed_critical_options").(string), AllowedExtensions: data.Get("allowed_extensions").(string), AllowUserCertificates: data.Get("allow_user_certificates").(bool), @@ -457,44 +457,12 @@ func (b *backend) createCARole(allowedUsers, defaultUser string, data *framework defaultCriticalOptions := convertMapToStringValue(data.Get("default_critical_options").(map[string]interface{})) defaultExtensions := convertMapToStringValue(data.Get("default_extensions").(map[string]interface{})) - var maxTTL time.Duration - maxSystemTTL := b.System().MaxLeaseTTL() - if len(role.MaxTTL) == 0 { - maxTTL = maxSystemTTL - } else { - var err error - maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) - if err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf( - "Invalid max ttl: %s", err)) - } - } - if maxTTL > maxSystemTTL { - return nil, logical.ErrorResponse("Requested max TTL is higher than backend maximum") + if ttl != 0 && maxTTL != 0 && ttl > maxTTL { + return nil, logical.ErrorResponse( + `"ttl" value must be less than "max_ttl" when both are specified`) } - ttl := b.System().DefaultLeaseTTL() - if len(role.TTL) != 0 { - var err error - ttl, err = parseutil.ParseDurationSecond(role.TTL) - if err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf( - "Invalid ttl: %s", err)) - } - } - if ttl > maxTTL { - // If they are using the system default, cap it to the role max; - // if it was specified on the command line, make it an error - if len(role.TTL) == 0 { - ttl = maxTTL - } else { - return nil, logical.ErrorResponse( - `"ttl" value must be less than "max_ttl" and/or backend default max lease TTL value`, - ) - } - } - - // Persist clamped TTLs + // Persist TTLs role.TTL = ttl.String() role.MaxTTL = maxTTL.String() role.DefaultCriticalOptions = defaultCriticalOptions @@ -551,13 +519,22 @@ func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*l }, }, nil } else if role.KeyType == KeyTypeCA { + ttl, err := parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return nil, err + } + maxTTL, err := parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return nil, err + } + return &logical.Response{ Data: map[string]interface{}{ - "allowed_users": role.AllowedUsers, - "allowed_domains": role.AllowedDomains, - "default_user": role.DefaultUser, - "max_ttl": role.MaxTTL, - "ttl": role.TTL, + "allowed_users": role.AllowedUsers, + "allowed_domains": role.AllowedDomains, + "default_user": role.DefaultUser, + "ttl": int64(ttl.Seconds()), + "max_ttl": int64(maxTTL.Seconds()), "allowed_critical_options": role.AllowedCriticalOptions, "allowed_extensions": role.AllowedExtensions, "allow_user_certificates": role.AllowUserCertificates, diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 4d62f4a375..b53a0ad0a1 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -43,7 +43,7 @@ func pathSign(b *backend) *framework.Path { Description: `The desired role with configuration for this request.`, }, "ttl": &framework.FieldSchema{ - Type: framework.TypeString, + Type: framework.TypeDurationSecond, Description: `The requested Time To Live for the SSH certificate; sets the expiration date. If not specified the role default, backend default, or system @@ -345,40 +345,34 @@ func (b *backend) calculateExtensions(data *framework.FieldData, role *sshRole) } func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) { - var ttl, maxTTL time.Duration - var ttlField string - ttlFieldInt, ok := data.GetOk("ttl") - if !ok { - ttlField = role.TTL - } else { - ttlField = ttlFieldInt.(string) - } + var err error - if len(ttlField) == 0 { + ttlRaw, specifiedTTL := data.GetOk("ttl") + if specifiedTTL { + ttl = time.Duration(ttlRaw.(int)) * time.Second + } else { + ttl, err = parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return 0, err + } + } + if ttl == 0 { ttl = b.System().DefaultLeaseTTL() - } else { - var err error - ttl, err = parseutil.ParseDurationSecond(ttlField) - if err != nil { - return 0, fmt.Errorf("invalid requested ttl: %s", err) - } } - if len(role.MaxTTL) == 0 { + maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return 0, err + } + if maxTTL == 0 { maxTTL = b.System().MaxLeaseTTL() - } else { - var err error - maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) - if err != nil { - return 0, fmt.Errorf("invalid requested max ttl: %s", err) - } } if ttl > maxTTL { // Don't error if they were using system defaults, only error if // they specifically chose a bad TTL - if len(ttlField) == 0 { + if !specifiedTTL { ttl = maxTTL } else { return 0, fmt.Errorf("ttl is larger than maximum allowed (%d)", maxTTL/time.Second) From 3e831ecf3f41aefa311ee0be34cd636c491e7bad Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 30 Oct 2017 16:08:18 -0400 Subject: [PATCH 045/329] changelog++ --- CHANGELOG.md | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efd986703f..9c40e0e122 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,25 +1,32 @@ ## 0.8.4 (Unreleased) DEPRECATIONS/CHANGES: - * aws-ec2: The client nonce generated by the backend that gets returned along - with the authentication response will be audited in plaintext. If this is - undesired, the clients can choose to supply a custom nonce to the login - endpoint. The custom nonce set by the client will from now on, not be - returned back with the authentication response, and hence not audit logged. + + * AWS EC2 client nonce behavior: The client nonce generated by the backend + that gets returned along with the authentication response will be audited in + plaintext. If this is undesired, the clients can choose to supply a custom + nonce to the login endpoint. The custom nonce set by the client will from + now on, not be returned back with the authentication response, and hence not + audit logged. + * SSH CA role read changes: When reading back a role from the `ssh` backend, + the TTL/max TTL values will now be an integer number of seconds rather than + a string. This better matches the API elsewhere in Vault. IMPROVEMENTS: * api: Add ability to set custom headers on each call [GH-3394] * command/server: Add config option to disable requesting client certificates [GH-3373] + * physical/file: Use `700` as permissions when creating directories. The files + themselves were `600` and are all encrypted, but this doesn't hurt. * secret/cassandra: Work around Cassandra ignoring consistency levels for a user listing query [GH-3469] * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON arrays [GH-3409] + * secret/ssh: Role TTL/max TTL can now be specified as either a string or an + integer [GH-3507] * secret/transit: Sign and verify operations now support a `none` hash algorithm to allow signing/verifying pre-hashed data [GH-3448] - * physical/file: Use `700` as permissions when creating directories. The files - themselves were `600` and are all encrypted, but this doesn't hurt. BUG FIXES: From 4121791cb95e1a02f4547e99512d5017914500e3 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Mon, 30 Oct 2017 13:24:25 -0700 Subject: [PATCH 046/329] Add the ability to glob allowed roles in the Database Backend (#3387) * Add the ability to glob allowed roles in the Database Backend * Make the error messages better * Switch to the go-glob repo --- builtin/logical/database/backend_test.go | 34 +++++++++++++++++++ builtin/logical/database/path_creds_create.go | 2 +- helper/strutil/strutil.go | 13 +++++++ helper/strutil/strutil_test.go | 32 +++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index d5461e2d63..f4dfd3b486 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -609,6 +609,40 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err) } + // update connection with glob allowed roles connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "allow*", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + // update connection with * allowed roles connection data = map[string]interface{}{ "connection_url": connURL, diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index 6fb61a3e52..610da726c6 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -49,7 +49,7 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { // If role name isn't in the database's allowed roles, send back a // permission denied. - if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContains(dbConfig.AllowedRoles, name) { + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, name) { return nil, logical.ErrPermissionDenied } diff --git a/helper/strutil/strutil.go b/helper/strutil/strutil.go index b5e69c4f25..eba4164d6e 100644 --- a/helper/strutil/strutil.go +++ b/helper/strutil/strutil.go @@ -6,8 +6,21 @@ import ( "fmt" "sort" "strings" + + glob "github.com/ryanuber/go-glob" ) +// StrListContainsGlob looks for a string in a list of strings and allows +// globs. +func StrListContainsGlob(haystack []string, needle string) bool { + for _, item := range haystack { + if glob.Glob(item, needle) { + return true + } + } + return false +} + // StrListContains looks for a string in a list of strings. func StrListContains(haystack []string, needle string) bool { for _, item := range haystack { diff --git a/helper/strutil/strutil_test.go b/helper/strutil/strutil_test.go index ce02719d1f..2939265006 100644 --- a/helper/strutil/strutil_test.go +++ b/helper/strutil/strutil_test.go @@ -57,6 +57,38 @@ func TestStrutil_EquivalentSlices(t *testing.T) { } } +func TestStrutil_ListContainsGlob(t *testing.T) { + haystack := []string{ + "dev", + "ops*", + "root/*", + "*-dev", + "_*_", + } + if StrListContainsGlob(haystack, "tubez") { + t.Fatalf("Value shouldn't exist") + } + if !StrListContainsGlob(haystack, "root/test") { + t.Fatalf("Value should exist") + } + if !StrListContainsGlob(haystack, "ops_test") { + t.Fatalf("Value should exist") + } + if !StrListContainsGlob(haystack, "ops") { + t.Fatalf("Value should exist") + } + if !StrListContainsGlob(haystack, "dev") { + t.Fatalf("Value should exist") + } + if !StrListContainsGlob(haystack, "test-dev") { + t.Fatalf("Value should exist") + } + if !StrListContainsGlob(haystack, "_test_") { + t.Fatalf("Value should exist") + } + +} + func TestStrutil_ListContains(t *testing.T) { haystack := []string{ "dev", From 0caf6e986c7b0ed98eb074df0bbb7000b8f00683 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Mon, 30 Oct 2017 13:26:15 -0700 Subject: [PATCH 047/329] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c40e0e122..b526869986 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ IMPROVEMENTS: integer [GH-3507] * secret/transit: Sign and verify operations now support a `none` hash algorithm to allow signing/verifying pre-hashed data [GH-3448] + * secret/database: Add the ability to glob allowed roles in the Database Backend [GH-3387] BUG FIXES: From 482d73aebe9e64cb0af479d5e2915714050bc9a5 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 19:11:24 +0000 Subject: [PATCH 048/329] Minor/Cosmetic fixes --- builtin/logical/nomad/backend_test.go | 11 +++++------ builtin/logical/nomad/path_config.go | 5 +---- website/source/docs/secrets/nomad/index.html.md | 5 ++--- website/source/layouts/api.erb | 6 +++--- website/source/layouts/docs.erb | 8 ++++---- 5 files changed, 15 insertions(+), 20 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index b64dc1bea1..8f47585807 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -2,7 +2,6 @@ package nomad import ( "fmt" - "log" "os" "reflect" "testing" @@ -64,7 +63,7 @@ func prepareTestContainer(t *testing.T) (cleanup func(), retAddress string, noma t.Fatalf("err: %v", err) } nomadToken = aclbootstrap.SecretID - log.Printf("[WARN] Generated Master token: %s", nomadToken) + t.Log("[WARN] Generated Master token: %s", nomadToken) policy := &nomadapi.ACLPolicy{ Name: "test", Description: "test", @@ -143,7 +142,7 @@ func TestBackend_config_access(t *testing.T) { expected := map[string]interface{}{ "address": connData["address"].(string), - "scheme": "http", + "scheme": "https", } if !reflect.DeepEqual(expected, resp.Data) { t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) @@ -213,7 +212,7 @@ func TestBackend_renew_revoke(t *testing.T) { if err := mapstructure.Decode(resp.Data, &d); err != nil { t.Fatal(err) } - log.Printf("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) + t.Log("[WARN] Generated token: %s with accesor %s", d.Token, d.Accessor) // Build a client and verify that the credentials work nomadapiConfig := nomadapi.DefaultConfig() @@ -224,7 +223,7 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - log.Printf("[WARN] Verifying that the generated token works...") + t.Log("[WARN] Verifying that the generated token works...") _, err = client.Agent().Members, nil if err != nil { t.Fatal(err) @@ -256,7 +255,7 @@ func TestBackend_renew_revoke(t *testing.T) { Namespace: "default", } - log.Printf("[WARN] Verifying that the generated token does not exist...") + t.Log("[WARN] Verifying that the generated token does not exist...") _, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q) if err == nil { t.Fatal("err: expected error") diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index d9e6dc1280..bcd3e15470 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -20,10 +20,7 @@ func pathConfigAccess() *framework.Path { Type: framework.TypeString, Description: "URI scheme for the Nomad address", - // https would be a better default but Consul on its own - // defaults to HTTP access, and when HTTPS is enabled it - // disables HTTP, so there isn't really any harm done here. - Default: "http", + Default: "https", }, "token": &framework.FieldSchema{ diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 3f11148ca5..4201f98c66 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -17,8 +17,7 @@ API tokens dynamically based on pre-existing Nomad ACL policies. This page will show a quick start for this backend. For detailed documentation on every path, use `vault path-help` after mounting the backend. -~> **Version information** ACLs are only available on Nomad 0.7.0 and above, -which is currently in beta. +~> **Version information** ACLs are only available on Nomad 0.7.0 and above. ## Quick Start @@ -46,7 +45,7 @@ Create Index = 7 Modify Index = 7 ``` The suggested pattern is to generate a token specifically for Vault, following the -[Nomad ACL guide](https://www.consul.io/docs/agent/http/acl.html) +[Nomad ACL guide](https://www.nomadproject.io/guides/acl.html) Next, we must configure Vault to know how to contact Nomad. This is done by writing the access information: diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index 26c394e960..2150cd1ef3 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -19,9 +19,6 @@ > Consul - > - Nomad - > Cubbyhole @@ -59,6 +56,9 @@ > Identity + > + Nomad + > PKI diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 2707dd8717..03e9eb3bd9 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -179,10 +179,6 @@ Consul - > - Nomad - - > Cubbyhole @@ -225,6 +221,10 @@ Identity + > + Nomad + + > PKI (Certificates) From 3a0d7ac9a6ee27f6e6d503f91972616fcef30a73 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 20:56:56 +0000 Subject: [PATCH 049/329] Unifying Storage and API path in role --- builtin/logical/nomad/backend_test.go | 2 +- builtin/logical/nomad/path_roles.go | 12 ++++++------ builtin/logical/nomad/path_token.go | 2 +- website/source/api/secret/nomad/index.html.md | 18 +++++++++--------- .../source/docs/secrets/nomad/index.html.md | 4 ++-- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 8f47585807..d88b5cfca1 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -178,7 +178,7 @@ func TestBackend_renew_revoke(t *testing.T) { t.Fatal(err) } - req.Path = "roles/test" + req.Path = "role/test" req.Data = map[string]interface{}{ "policy": []string{"policy"}, "lease": "6h", diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 53fb7119d2..3e1e6841e1 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -10,7 +10,7 @@ import ( func pathListRoles(b *backend) *framework.Path { return &framework.Path{ - Pattern: "roles/?$", + Pattern: "role/?$", Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, @@ -20,7 +20,7 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles() *framework.Path { return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), + Pattern: "role/" + framework.GenericNameRegex("name"), Fields: map[string]*framework.FieldSchema{ "name": &framework.FieldSchema{ Type: framework.TypeString, @@ -62,7 +62,7 @@ Defaults to 'client'.`, func (b *backend) pathRoleList( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List("policy/") + entries, err := req.Storage.List("role/") if err != nil { return nil, err } @@ -74,7 +74,7 @@ func pathRolesRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("policy/" + name) + entry, err := req.Storage.Get("role/" + name) if err != nil { return nil, err } @@ -138,7 +138,7 @@ func pathRolesWrite( } } - entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{ + entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ Policy: policy, Lease: lease, TokenType: tokenType, @@ -158,7 +158,7 @@ func pathRolesWrite( func pathRolesDelete( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - if err := req.Storage.Delete("policy/" + name); err != nil { + if err := req.Storage.Delete("role/" + name); err != nil { return nil, err } return nil, nil diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 1a1c66fdee..b8f5efa350 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -29,7 +29,7 @@ func (b *backend) pathTokenRead( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - entry, err := req.Storage.Get("policy/" + name) + entry, err := req.Storage.Get("role/" + name) if err != nil { return nil, fmt.Errorf("error retrieving role: %s", err) } diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index 9cdf59b976..d3379b2852 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -59,7 +59,7 @@ updated attributes. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/nomad/roles/:name` | `204 (empty body)` | +| `POST` | `/nomad/role/:name` | `204 (empty body)` | ### Parameters @@ -95,7 +95,7 @@ $ curl \ --request POST \ --header "X-Vault-Token: ..." \ --data @payload.json \ - https://vault.rocks/v1/nomad/roles/monitoring + https://vault.rocks/v1/nomad/role/monitoring ``` ## Read Role @@ -105,7 +105,7 @@ If no role exists with that name, a 404 is returned. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `GET` | `/nomad/roles/:name` | `200 application/json` | +| `GET` | `/nomad/role/:name` | `200 application/json` | ### Parameters @@ -117,7 +117,7 @@ If no role exists with that name, a 404 is returned. ``` $ curl \ --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/nomad/roles/monitoring + https://vault.rocks/v1/nomad/role/monitoring ``` ### Sample Response @@ -147,8 +147,8 @@ This endpoint lists all existing roles in the backend. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `LIST` | `/nomad/roles` | `200 application/json` | -| `GET` | `/nomad/roles?list=true` | `200 application/json` | +| `LIST` | `/nomad/role` | `200 application/json` | +| `GET` | `/nomad/role?list=true` | `200 application/json` | ### Sample Request @@ -156,7 +156,7 @@ This endpoint lists all existing roles in the backend. $ curl \ --header "X-Vault-Token: ..." \ --request LIST \ - https://vault.rocks/v1/nomad/roles + https://vault.rocks/v1/nomad/role ``` ### Sample Response @@ -185,7 +185,7 @@ not exist, this endpoint will still return a successful response. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `DELETE` | `/nomad/roles/:name` | `204 (empty body)` | +| `DELETE` | `/nomad/role/:name` | `204 (empty body)` | ### Parameters @@ -198,7 +198,7 @@ not exist, this endpoint will still return a successful response. $ curl \ --request DELETE \ --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/nomad/roles/example-role + https://vault.rocks/v1/nomad/role/example-role ``` ## Generate Credential diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 4201f98c66..754426762a 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -67,8 +67,8 @@ to a set of policy names used to generate those credentials. For example, lets c an "monitoring" role that maps to a "readonly" policy: ``` -$ vault write nomad/roles/monitoring policy=readonly -Success! Data written to: nomad/roles/monitoring +$ vault write nomad/role/monitoring policy=readonly +Success! Data written to: nomad/role/monitoring ``` The backend expects either a single or a comma separated list of policy names. From ffb9343f5f638e1f32f6168b34729a7f137e7349 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Tue, 31 Oct 2017 21:12:14 +0000 Subject: [PATCH 050/329] Should return an error if trying create a management token with policies attached --- builtin/logical/nomad/path_roles.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 3e1e6841e1..e4e7456006 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -126,6 +126,11 @@ func pathRolesWrite( return logical.ErrorResponse( "policy cannot be empty when not using management tokens"), nil } + } else { + if len(policy) != 0 { + return logical.ErrorResponse( + "policy should be empty when using management tokens"), nil + } } var lease time.Duration From 83f77d5a5f198dd97217b46e13082f893727e64e Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 31 Oct 2017 20:58:45 -0500 Subject: [PATCH 051/329] Fix memory leak when a connection would hit the cluster port and go away (#3513) --- vault/request_forwarding.go | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go index 8635281d0f..13dbcdeb69 100644 --- a/vault/request_forwarding.go +++ b/vault/request_forwarding.go @@ -122,11 +122,10 @@ func (c *Core) startForwarding() error { // Accept the connection conn, err := tlsLn.Accept() - if conn != nil { - // Always defer although it may be closed ahead of time - defer conn.Close() - } - if err != nil { + if err != nil || conn == nil { + if conn != nil { + conn.Close() + } continue } @@ -138,9 +137,7 @@ func (c *Core) startForwarding() error { if c.logger.IsDebug() { c.logger.Debug("core: error handshaking cluster connection", "error", err) } - if conn != nil { - conn.Close() - } + conn.Close() continue } @@ -153,10 +150,14 @@ func (c *Core) startForwarding() error { c.logger.Trace("core: got request forwarding connection") c.clusterParamsLock.RLock() - go fws.ServeConn(conn, &http2.ServeConnOpts{ - Handler: c.rpcServer, - }) + rpcServer := c.rpcServer c.clusterParamsLock.RUnlock() + go func() { + defer conn.Close() + fws.ServeConn(conn, &http2.ServeConnOpts{ + Handler: rpcServer, + }) + }() default: c.logger.Debug("core: unknown negotiated protocol on cluster port") From c9f01963c369d5748603baec08bfafdc99814b08 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 31 Oct 2017 21:59:33 -0400 Subject: [PATCH 052/329] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b526869986..d6501097b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,8 @@ BUG FIXES: * api: Fix panic when setting a custom HTTP client but with a nil transport [GH-3437] * auth/radius: Fix logging in in some situations [GH-3461] + * core: Fix memleak when a connection would connect to the cluster port and + then go away [GH-3513] * physical/etcd3: Fix some listing issues due to how etcd3 does prefix matching [GH-3406] * physical/file: Fix listing when underscores are the first component of a From 5d3513b56893716ddd1d6763f8077a012c0bcd3c Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:36:14 +0000 Subject: [PATCH 053/329] tokenType can never be nil/empty string as there are default values --- builtin/logical/nomad/path_roles.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index e4e7456006..3beec71754 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -87,10 +87,6 @@ func pathRolesRead( return nil, err } - if result.TokenType == "" { - result.TokenType = "client" - } - // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ From c4bf80c84f619df399ab08976d9ad38914c78439 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:41:58 +0000 Subject: [PATCH 054/329] Ignoring userErr as it will be nil anyway --- builtin/logical/nomad/path_config.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index bcd3e15470..453fbbe630 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -57,13 +57,10 @@ func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { func pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, userErr, intErr := readConfigAccess(req.Storage) + conf, _, intErr := readConfigAccess(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - return logical.ErrorResponse(userErr.Error()), nil - } if conf == nil { return nil, fmt.Errorf("no user error reported but nomad access configuration not found") } From dcaec0a880682c04159029fc78296064c7a9db6d Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 07:50:17 +0000 Subject: [PATCH 055/329] Refactored config error to just have a single error exit path --- builtin/logical/nomad/path_config.go | 7 +++---- builtin/logical/nomad/secret_token.go | 9 +-------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index 453fbbe630..4b23f48191 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -42,9 +42,8 @@ func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { return nil, nil, err } if entry == nil { - return nil, fmt.Errorf( - "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint"), - nil + return nil, nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") } conf := &accessConfig{} @@ -62,7 +61,7 @@ func pathConfigAccessRead( return nil, intErr } if conf == nil { - return nil, fmt.Errorf("no user error reported but nomad access configuration not found") + return nil, fmt.Errorf("no user or internal error reported but nomad access configuration not found") } return &logical.Response{ diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index ad02b16fee..00df0fef7b 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -41,14 +41,7 @@ func secretTokenRevoke( return nil, userErr } - tokenRaw, ok := req.Secret.InternalData["accessor_id"] - if !ok { - // We return nil here because this is a pre-0.5.3 problem and there is - // nothing we can do about it. We already can't revoke the lease - // properly if it has been renewed and this is documented pre-0.5.3 - // behavior with a security bulletin about it. - return nil, nil - } + tokenRaw, _ := req.Secret.InternalData["accessor_id"] _, err := c.ACLTokens().Delete(tokenRaw.(string), nil) if err != nil { From ca92922a917d88d88499248a360c257606166984 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Wed, 1 Nov 2017 08:49:31 +0000 Subject: [PATCH 056/329] Refactoring readAcessConfig to return a single type of error instead of two --- builtin/logical/nomad/client.go | 5 +---- builtin/logical/nomad/path_config.go | 14 +++++++------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go index 2101d31dc8..80879d76d8 100644 --- a/builtin/logical/nomad/client.go +++ b/builtin/logical/nomad/client.go @@ -8,13 +8,10 @@ import ( ) func client(s logical.Storage) (*api.Client, error, error) { - conf, userErr, intErr := readConfigAccess(s) + conf, intErr := readConfigAccess(s) if intErr != nil { return nil, nil, intErr } - if userErr != nil { - return nil, userErr, nil - } if conf == nil { return nil, nil, fmt.Errorf("no error received but no configuration found") } diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config.go index 4b23f48191..4e9eb151f9 100644 --- a/builtin/logical/nomad/path_config.go +++ b/builtin/logical/nomad/path_config.go @@ -36,27 +36,27 @@ func pathConfigAccess() *framework.Path { } } -func readConfigAccess(storage logical.Storage) (*accessConfig, error, error) { +func readConfigAccess(storage logical.Storage) (*accessConfig, error) { entry, err := storage.Get("config/access") if err != nil { - return nil, nil, err + return nil, err } if entry == nil { - return nil, nil, fmt.Errorf( - "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") + return nil, fmt.Errorf( + "Access credentials for the backend itself haven't been configured. Please configure them at the '/config/access' endpoint") } conf := &accessConfig{} if err := entry.DecodeJSON(conf); err != nil { - return nil, nil, fmt.Errorf("error reading nomad access configuration: %s", err) + return nil, fmt.Errorf("error reading nomad access configuration: %s", err) } - return conf, nil, nil + return conf, nil } func pathConfigAccessRead( req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - conf, _, intErr := readConfigAccess(req.Storage) + conf, intErr := readConfigAccess(req.Storage) if intErr != nil { return nil, intErr } From 6d3eb3f814886aa26a5c6e50aa1016babbcc5fc2 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Wed, 1 Nov 2017 14:14:21 -0400 Subject: [PATCH 057/329] fix deadlock while loading groups (#3515) --- vault/identity_store_util.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index c9ea1eb06e..0444bf6f8a 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -60,6 +60,9 @@ func (i *IdentityStore) loadGroups() error { } i.logger.Debug("identity: groups collected", "num_existing", len(existing)) + i.groupLock.Lock() + defer i.groupLock.Unlock() + for _, key := range existing { bucket, err := i.groupPacker.GetBucket(i.groupPacker.BucketPath(key)) if err != nil { @@ -83,14 +86,11 @@ func (i *IdentityStore) loadGroups() error { i.logger.Trace("loading group", "name", group.Name, "id", group.ID) } - i.groupLock.Lock() - defer i.groupLock.Unlock() - txn := i.db.Txn(true) - defer txn.Abort() err = i.upsertGroupInTxn(txn, group, false) if err != nil { + txn.Abort() return fmt.Errorf("failed to update group in memdb: %v", err) } From 972834a61088c2363c1369dfaf5d34778aff3a88 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 1 Nov 2017 15:52:59 -0400 Subject: [PATCH 058/329] Use an atomic store in expiration loading test to fix race detector --- vault/token_store_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vault/token_store_test.go b/vault/token_store_test.go index efdefbbad9..1dd1e916c4 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -8,6 +8,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "testing" "time" @@ -567,7 +568,7 @@ func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) { // Reset expiration manager to restore mode ts.expiration.restoreModeLock.Lock() - ts.expiration.restoreMode = 1 + atomic.StoreInt32(&ts.expiration.restoreMode, 1) ts.expiration.restoreLocks = locksutil.CreateLocks() ts.expiration.restoreModeLock.Unlock() From 962ef74cb26bc5128801279b80181243bb3d1ca0 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 1 Nov 2017 21:00:41 -0500 Subject: [PATCH 059/329] Add seal type to seal-status output. (#3516) --- api/sys_seal.go | 1 + command/status.go | 4 +++- http/sys_seal.go | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/api/sys_seal.go b/api/sys_seal.go index 97a49aeb44..72fe06e8ed 100644 --- a/api/sys_seal.go +++ b/api/sys_seal.go @@ -49,6 +49,7 @@ func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { } type SealStatusResponse struct { + Type string `json:"type"` Sealed bool `json:"sealed"` T int `json:"t"` N int `json:"n"` diff --git a/command/status.go b/command/status.go index 7b6cce348f..6b2f3a0a76 100644 --- a/command/status.go +++ b/command/status.go @@ -36,12 +36,14 @@ func (c *StatusCommand) Run(args []string) int { } outStr := fmt.Sprintf( - "Sealed: %v\n"+ + "Type: %s\n"+ + "Sealed: %v\n"+ "Key Shares: %d\n"+ "Key Threshold: %d\n"+ "Unseal Progress: %d\n"+ "Unseal Nonce: %v\n"+ "Version: %s", + sealStatus.Type, sealStatus.Sealed, sealStatus.N, sealStatus.T, diff --git a/http/sys_seal.go b/http/sys_seal.go index ef2430495a..4805e36de5 100644 --- a/http/sys_seal.go +++ b/http/sys_seal.go @@ -190,6 +190,7 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req progress, nonce := core.SecretProgress() respondOk(w, &SealStatusResponse{ + Type: sealConfig.Type, Sealed: sealed, T: sealConfig.SecretThreshold, N: sealConfig.SecretShares, @@ -202,6 +203,7 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req } type SealStatusResponse struct { + Type string `json:"type"` Sealed bool `json:"sealed"` T int `json:"t"` N int `json:"n"` From ed8cf070c9dc692455231a35e7ce3b549e9c96b1 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Thu, 2 Nov 2017 07:18:49 -0400 Subject: [PATCH 060/329] Add ability to require parameters in ACLs (#3510) --- vault/acl.go | 18 ++++++++++++ vault/acl_test.go | 29 ++++++++++++++----- vault/policy.go | 15 +++++++--- vault/policy_test.go | 18 ++++++++++++ website/source/docs/concepts/policies.html.md | 19 +++++++++--- 5 files changed, 83 insertions(+), 16 deletions(-) diff --git a/vault/acl.go b/vault/acl.go index 4a6b3bb613..f19bc29892 100644 --- a/vault/acl.go +++ b/vault/acl.go @@ -169,6 +169,18 @@ func NewACL(policies []*Policy) (*ACL, error) { } } + if len(pc.Permissions.RequiredParameters) > 0 { + if len(existingPerms.RequiredParameters) == 0 { + existingPerms.RequiredParameters = pc.Permissions.RequiredParameters + } else { + for _, v := range pc.Permissions.RequiredParameters { + if !strutil.StrListContains(existingPerms.RequiredParameters, v) { + existingPerms.RequiredParameters = append(existingPerms.RequiredParameters, v) + } + } + } + } + INSERT: tree.Insert(pc.Prefix, existingPerms) } @@ -322,6 +334,12 @@ CHECK: // Only check parameter permissions for operations that can modify // parameters. if op == logical.UpdateOperation || op == logical.CreateOperation { + for _, parameter := range permissions.RequiredParameters { + if _, ok := req.Data[strings.ToLower(parameter)]; !ok { + return + } + } + // If there are no data fields, allow if len(req.Data) == 0 { ret.Allowed = true diff --git a/vault/acl_test.go b/vault/acl_test.go index c753bb0ed1..09dfbcd6b8 100644 --- a/vault/acl_test.go +++ b/vault/acl_test.go @@ -232,6 +232,7 @@ func TestACL_PolicyMerge(t *testing.T) { maxWrappingTTL *time.Duration allowed map[string][]interface{} denied map[string][]interface{} + required []string } createDuration := func(seconds int) *time.Duration { @@ -240,14 +241,14 @@ func TestACL_PolicyMerge(t *testing.T) { } tcases := []tcase{ - {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": []interface{}{}, "baz": []interface{}{}}}, - {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": []interface{}{}, "bar": []interface{}{}}, nil}, - {"allow/all", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil}, - {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil}, - {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}}, - {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}}, - {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}}, - {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}}, + {"foo/bar", nil, nil, nil, map[string][]interface{}{"zip": []interface{}{}, "baz": []interface{}{}}, []string{"baz"}}, + {"hello/universe", createDuration(50), createDuration(200), map[string][]interface{}{"foo": []interface{}{}, "bar": []interface{}{}}, nil, []string{"foo", "bar"}}, + {"allow/all", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil, nil}, + {"allow/all1", nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}, "test1": []interface{}{"foo"}}, nil, nil}, + {"deny/all", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}, nil}, + {"deny/all1", nil, nil, nil, map[string][]interface{}{"*": []interface{}{}, "test": []interface{}{}}, nil}, + {"value/merge", nil, nil, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, map[string][]interface{}{"test": []interface{}{3, 4, 1, 2}}, nil}, + {"value/empty", nil, nil, map[string][]interface{}{"empty": []interface{}{}}, map[string][]interface{}{"empty": []interface{}{}}, nil}, } for _, tc := range tcases { @@ -263,6 +264,9 @@ func TestACL_PolicyMerge(t *testing.T) { if !reflect.DeepEqual(tc.denied, p.DeniedParameters) { t.Fatalf("Denied paramaters did not match, Expected: %#v, Got: %#v", tc.denied, p.DeniedParameters) } + if !reflect.DeepEqual(tc.required, p.RequiredParameters) { + t.Fatalf("Required paramaters did not match, Expected: %#v, Got: %#v", tc.required, p.RequiredParameters) + } if tc.minWrappingTTL != nil && *tc.minWrappingTTL != p.MinWrappingTTL { t.Fatalf("Min wrapping TTL did not match, Expected: %#v, Got: %#v", tc.minWrappingTTL, p.MinWrappingTTL) } @@ -319,6 +323,8 @@ func TestACL_AllowOperation(t *testing.T) { {"fruit/apple", nil, []string{"one"}, false}, {"cold/weather", nil, []string{"four"}, true}, {"var/aws", nil, []string{"cold", "warm", "kitty"}, false}, + {"var/req", nil, []string{"cold", "warm", "kitty"}, false}, + {"var/req", nil, []string{"cold", "warm", "kitty", "foo"}, true}, } for _, tc := range tcases { @@ -510,6 +516,7 @@ path "foo/bar" { denied_parameters = { "baz" = [] } + required_parameters = ["baz"] } path "foo/bar" { policy = "write" @@ -522,6 +529,7 @@ path "hello/universe" { allowed_parameters = { "foo" = [] } + required_parameters = ["foo"] max_wrapping_ttl = 300 min_wrapping_ttl = 100 } @@ -530,6 +538,7 @@ path "hello/universe" { allowed_parameters = { "bar" = [] } + required_parameters = ["bar"] max_wrapping_ttl = 200 min_wrapping_ttl = 50 } @@ -705,6 +714,10 @@ path "var/aws" { "kitty" = [] } } +path "var/req" { + policy = "write" + required_parameters = ["foo"] +} ` //allow operation testing diff --git a/vault/policy.go b/vault/policy.go index 642e6e56ec..74d759dc91 100644 --- a/vault/policy.go +++ b/vault/policy.go @@ -96,10 +96,11 @@ type PathRules struct { // These keys are used at the top level to make the HCL nicer; we store in // the ACLPermissions object though - MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"` - MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"` - AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"` - DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"` + MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"` + MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"` + AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"` + DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"` + RequiredParametersHCL []string `hcl:"required_parameters"` } type ACLPermissions struct { @@ -108,6 +109,7 @@ type ACLPermissions struct { MaxWrappingTTL time.Duration AllowedParameters map[string][]interface{} DeniedParameters map[string][]interface{} + RequiredParameters []string } func (p *ACLPermissions) Clone() (*ACLPermissions, error) { @@ -115,6 +117,7 @@ func (p *ACLPermissions) Clone() (*ACLPermissions, error) { CapabilitiesBitmap: p.CapabilitiesBitmap, MinWrappingTTL: p.MinWrappingTTL, MaxWrappingTTL: p.MaxWrappingTTL, + RequiredParameters: p.RequiredParameters[:], } switch { @@ -198,6 +201,7 @@ func parsePaths(result *Policy, list *ast.ObjectList) error { "capabilities", "allowed_parameters", "denied_parameters", + "required_parameters", "min_wrapping_ttl", "max_wrapping_ttl", } @@ -290,6 +294,9 @@ func parsePaths(result *Policy, list *ast.ObjectList) error { pc.Permissions.MaxWrappingTTL < pc.Permissions.MinWrappingTTL { return errors.New("max_wrapping_ttl cannot be less than min_wrapping_ttl") } + if len(pc.RequiredParametersHCL) > 0 { + pc.Permissions.RequiredParameters = pc.RequiredParametersHCL[:] + } PathFinished: paths = append(paths, &pc) diff --git a/vault/policy_test.go b/vault/policy_test.go index 96786cc26d..ce516486a4 100644 --- a/vault/policy_test.go +++ b/vault/policy_test.go @@ -85,6 +85,10 @@ path "test/types" { "bool" = [false] } } +path "test/req" { + capabilities = ["create", "sudo"] + required_parameters = ["foo"] +} `) func TestPolicy_Parse(t *testing.T) { @@ -225,6 +229,20 @@ func TestPolicy_Parse(t *testing.T) { }, Glob: false, }, + &PathRules{ + Prefix: "test/req", + Policy: "", + Capabilities: []string{ + "create", + "sudo", + }, + RequiredParametersHCL: []string{"foo"}, + Permissions: &ACLPermissions{ + CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt), + RequiredParameters: []string{"foo"}, + }, + Glob: false, + }, } if !reflect.DeepEqual(p.Paths, expect) { t.Errorf("expected \n\n%#v\n\n to be \n\n%#v\n\n", p.Paths, expect) diff --git a/website/source/docs/concepts/policies.html.md b/website/source/docs/concepts/policies.html.md index c868b7261a..45128b6310 100644 --- a/website/source/docs/concepts/policies.html.md +++ b/website/source/docs/concepts/policies.html.md @@ -113,9 +113,9 @@ path "secret/super-secret" { capabilities = ["deny"] } -# Policies can also specify allowed and disallowed parameters. Here the key -# "secret/restricted" can only contain "foo" (any value) and "bar" (one of "zip" -# or "zap"). +# Policies can also specify allowed, disallowed, and required parameters. Here +# the key "secret/restricted" can only contain "foo" (any value) and "bar" (one +# of "zip" or "zap"). path "secret/restricted" { capabilities = ["create"] allowed_parameters = { @@ -217,13 +217,24 @@ In addition to the standard set of capabilities, Vault offers finer-grained control over permissions at a given path. The capabilities associated with a path take precedence over permissions on parameters. -### Allowed and Denied Parameters +### Parameter Constraints In Vault, data is represented as `key=value` pairs. Vault policies can optionally further restrict paths based on the keys and data at those keys when evaluating the permissions for a path. The optional finer-grained control options are: + * `required_parameters` - A list of parameters that must be specified. + + ```ruby + # This requires the user to create "secret/foo" with a parameter named + # "bar" and "baz". + path "secret/foo" { + capabilities = ["create"] + required_parameters = ["bar", "baz"] + } + ``` + * `allowed_parameters` - Whitelists a list of keys and values that are permitted on the given path. From 3e7a3acb2204bca3aa2acbd69c947b37031b2aa2 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 07:31:50 -0500 Subject: [PATCH 061/329] Change some instances of adding headers to setting headers, since really (#3501) we want to replace anything that might be there (e.g. for request forwarding and content-type). Hopefully fixes #3485 --- http/handler.go | 8 +++----- http/sys_health.go | 4 ++-- vault/request_forwarding.go | 4 +--- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/http/handler.go b/http/handler.go index 4561e15aa3..4b5b90f71a 100644 --- a/http/handler.go +++ b/http/handler.go @@ -209,9 +209,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle if header != nil { for k, v := range header { - for _, j := range v { - w.Header().Add(k, j) - } + w.Header()[k] = v } } @@ -332,7 +330,7 @@ func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, e func respondError(w http.ResponseWriter, status int, err error) { logical.AdjustErrorStatusCode(&status, err) - w.Header().Add("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json") w.WriteHeader(status) resp := &ErrorResponse{Errors: make([]string, 0, 1)} @@ -355,7 +353,7 @@ func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logic } func respondOk(w http.ResponseWriter, body interface{}) { - w.Header().Add("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json") if body == nil { w.WriteHeader(http.StatusNoContent) diff --git a/http/sys_health.go b/http/sys_health.go index 40797be129..453a1c5abc 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -49,7 +49,7 @@ func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request return } - w.Header().Add("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) // Generate the response @@ -64,7 +64,7 @@ func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Reques } if body != nil { - w.Header().Add("Content-Type", "application/json") + w.Header().Set("Content-Type", "application/json") } w.WriteHeader(code) } diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go index 13dbcdeb69..e1b95e3a22 100644 --- a/vault/request_forwarding.go +++ b/vault/request_forwarding.go @@ -300,9 +300,7 @@ func (c *Core) ForwardRequest(req *http.Request) (int, http.Header, []byte, erro if resp.HeaderEntries != nil { header = make(http.Header) for k, v := range resp.HeaderEntries { - for _, j := range v.Values { - header.Add(k, j) - } + header[k] = v.Values } } From 66b2d26bf72b06f49df9d1fa785d2a003e62ad49 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 08:47:02 -0500 Subject: [PATCH 062/329] Ensure revocation happens before seal/step-down since token store isn't (#3500) available after when using single-use tokens. Fixes #3497 --- vault/core.go | 40 ++++++++++++++++++++-------------------- vault/core_test.go | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 20 deletions(-) diff --git a/vault/core.go b/vault/core.go index 78b2526578..0b731cd7d4 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1231,16 +1231,6 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { c.stateLock.RUnlock() return retErr } - if te.NumUses == -1 { - // Token needs to be revoked - defer func(id string) { - err = c.tokenStore.Revoke(id) - if err != nil { - c.logger.Error("core: token needed revocation after seal but failed to revoke", "error", err) - retErr = multierror.Append(retErr, ErrInternalError) - } - }(te.ID) - } } // Verify that this operation is allowed @@ -1258,6 +1248,16 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) { return retErr } + if te != nil && te.NumUses == -1 { + // Token needs to be revoked. We do this immediately here because + // we won't have a token store after sealing. + err = c.tokenStore.Revoke(te.ID) + if err != nil { + c.logger.Error("core: token needed revocation before seal but failed to revoke", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + } + } + // Tell any requests that know about this to stop if c.requestContextCancelFunc != nil { c.requestContextCancelFunc() @@ -1334,16 +1334,6 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { retErr = multierror.Append(retErr, logical.ErrPermissionDenied) return retErr } - if te.NumUses == -1 { - // Token needs to be revoked - defer func(id string) { - err = c.tokenStore.Revoke(id) - if err != nil { - c.logger.Error("core: token needed revocation after step-down but failed to revoke", "error", err) - retErr = multierror.Append(retErr, ErrInternalError) - } - }(te.ID) - } } // Verify that this operation is allowed @@ -1359,6 +1349,16 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) { return retErr } + if te != nil && te.NumUses == -1 { + // Token needs to be revoked. We do this immediately here because + // we won't have a token store after sealing. + err = c.tokenStore.Revoke(te.ID) + if err != nil { + c.logger.Error("core: token needed revocation before step-down but failed to revoke", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + } + } + select { case c.manualStepDownCh <- struct{}{}: default: diff --git a/vault/core_test.go b/vault/core_test.go index ab3dc74640..68303b2f0f 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -272,6 +272,41 @@ func TestCore_Seal_BadToken(t *testing.T) { } } +// GH-3497 +func TestCore_Seal_SingleUse(t *testing.T) { + c, keys, _ := TestCoreUnsealed(t) + c.tokenStore.create(&TokenEntry{ + ID: "foo", + NumUses: 1, + Policies: []string{"root"}, + }) + if err := c.Seal("foo"); err != nil { + t.Fatalf("err: %v", err) + } + if sealed, err := c.Sealed(); err != nil || !sealed { + t.Fatalf("err: %v, sealed: %t", err, sealed) + } + for i, key := range keys { + unseal, err := TestCoreUnseal(c, key) + if err != nil { + t.Fatalf("err: %v", err) + } + if i+1 == len(keys) && !unseal { + t.Fatalf("err: should be unsealed") + } + } + if err := c.Seal("foo"); err == nil { + t.Fatal("expected error from revoked token") + } + te, err := c.tokenStore.Lookup("foo") + if err != nil { + t.Fatal(err) + } + if te != nil { + t.Fatalf("expected nil token entry, got %#v", *te) + } +} + // Ensure we get a LeaseID func TestCore_HandleRequest_Lease(t *testing.T) { c, _, root := TestCoreUnsealed(t) From 41568317e02262985bdf0a84e09cc5bf3fe19c40 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 09:30:04 -0500 Subject: [PATCH 063/329] Redo API locking (#3508) * Redo the API client quite a bit to make the behavior of NewClient more predictable and add locking to make it safer to use with Clone() and if multiple goroutines for some reason decide to change things. Along the way I discovered that currently, the x/net/http2 package is broke with the built-in h2 support in released Go. For those using DefaultConfig (the vast majority of cases) this will be a non-event. Others can manually call http2.ConfigureTransport as needed. We should keep an eye on commits on that repo and consider more updates before release. Alternately we could go back revisions but miss out on bug fixes; my theory is that this is not a purposeful break and I'll be following up on this in the Go issue tracker. In a few tests that don't use NewTestCluster, either for legacy or other reasons, ensure that http2.ConfigureTransport is called. * Use tls config cloning * Don't http2.ConfigureServer anymore as current Go seems to work properly without requiring the http2 package * Address feedback --- api/api_test.go | 5 -- api/client.go | 187 +++++++++++++++++++++++++++------------- command/server.go | 19 ++-- http/forwarding_test.go | 7 +- http/testing.go | 5 -- vault/testing.go | 8 +- 6 files changed, 141 insertions(+), 90 deletions(-) diff --git a/api/api_test.go b/api/api_test.go index d9059eab15..b2b851df6e 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -5,8 +5,6 @@ import ( "net" "net/http" "testing" - - "golang.org/x/net/http2" ) // testHTTPServer creates a test HTTP server that handles requests until @@ -19,9 +17,6 @@ func testHTTPServer( } server := &http.Server{Handler: handler} - if err := http2.ConfigureServer(server, nil); err != nil { - t.Fatal(err) - } go server.Serve(ln) config := DefaultConfig() diff --git a/api/client.go b/api/client.go index d801c43975..ca34f51f35 100644 --- a/api/client.go +++ b/api/client.go @@ -13,12 +13,11 @@ import ( "sync" "time" - "golang.org/x/net/http2" - "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/vault/helper/parseutil" "github.com/sethgrid/pester" + "golang.org/x/net/http2" ) const EnvVaultAddress = "VAULT_ADDR" @@ -43,18 +42,21 @@ type WrappingLookupFunc func(operation, path string) string // Config is used to configure the creation of the client. type Config struct { + modifyLock sync.RWMutex + // Address is the address of the Vault server. This should be a complete // URL such as "http://vault.example.com". If you need a custom SSL // cert or want to enable insecure mode, you need to specify a custom // HttpClient. Address string - // HttpClient is the HTTP client to use, which will currently always have the - // same values as http.DefaultClient. This is used to control redirect behavior. + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). HttpClient *http.Client - redirectSetup sync.Once - // MaxRetries controls the maximum number of times to retry when a 5xx error // occurs. Set to 0 or less to disable retrying. Defaults to 0. MaxRetries int @@ -93,26 +95,49 @@ type TLSConfig struct { // // The default Address is https://127.0.0.1:8200, but this can be overridden by // setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, this will return nil. func DefaultConfig() *Config { config := &Config{ Address: "https://127.0.0.1:8200", HttpClient: cleanhttp.DefaultClient(), } config.HttpClient.Timeout = time.Second * 60 + transport := config.HttpClient.Transport.(*http.Transport) transport.TLSHandshakeTimeout = 10 * time.Second transport.TLSClientConfig = &tls.Config{ MinVersion: tls.VersionTLS12, } + if err := http2.ConfigureTransport(transport); err != nil { + return nil + } + + if err := config.ReadEnvironment(); err != nil { + return nil + } if v := os.Getenv(EnvVaultAddress); v != "" { config.Address = v } + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise pester tries + // three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + return config } -// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client. +// ConfigureTLS takes a set of TLS configurations and applies those to the the +// HTTP client. func (c *Config) ConfigureTLS(t *TLSConfig) error { if c.HttpClient == nil { c.HttpClient = DefaultConfig().HttpClient @@ -154,9 +179,8 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error { return nil } -// ReadEnvironment reads configuration information from the -// environment. If there is an error, no configuration value -// is updated. +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. func (c *Config) ReadEnvironment() error { var envAddress string var envCACert string @@ -218,6 +242,10 @@ func (c *Config) ReadEnvironment() error { TLSServerName: envTLSServerName, Insecure: envInsecure, } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if err := c.ConfigureTLS(t); err != nil { return err } @@ -237,10 +265,9 @@ func (c *Config) ReadEnvironment() error { return nil } -// Client is the client to the Vault API. Create a client with NewClient. Note: -// it is not safe to modify client configuration from multiple goroutines at -// once. Set configuration first, then run requests. +// Client is the client to the Vault API. Create a client with NewClient. type Client struct { + modifyLock sync.RWMutex addr *url.URL config *Config token string @@ -250,77 +277,46 @@ type Client struct { policyOverride bool } -// SetMFACreds sets the MFA credentials supplied either via the environment -// variable or via the command line. -func (c *Client) SetMFACreds(creds []string) { - c.mfaCreds = creds -} - // NewClient returns a new client for the given configuration. // +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// // If the environment variable `VAULT_TOKEN` is present, the token will be // automatically added to the client. Otherwise, you must manually call // `SetToken()`. func NewClient(c *Config) (*Client, error) { - if c == nil { - c = DefaultConfig() - if err := c.ReadEnvironment(); err != nil { - return nil, fmt.Errorf("error reading environment: %v", err) - } + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") } + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + u, err := url.Parse(c.Address) if err != nil { return nil, err } if c.HttpClient == nil { - c.HttpClient = DefaultConfig().HttpClient + c.HttpClient = def.HttpClient } if c.HttpClient.Transport == nil { - c.HttpClient.Transport = cleanhttp.DefaultTransport() + c.HttpClient.Transport = def.HttpClient.Transport } - if tp, ok := c.HttpClient.Transport.(*http.Transport); ok { - if tcc := tp.TLSClientConfig; tcc != nil { - var found bool - for _, proto := range tcc.NextProtos { - if proto == "h2" { - found = true - break - } - } - if !found { - if err := http2.ConfigureTransport(tp); err != nil { - return nil, err - } - } - } - } - - redirFunc := func() { - // Ensure redirects are not automatically followed - // Note that this is sane for the API client as it has its own - // redirect handling logic (and thus also for command/meta), - // but in e.g. http_test actual redirect handling is necessary - c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { - // Returning this value causes the Go net library to not close the - // response body and to nil out the error. Otherwise pester tries - // three times on every redirect because it sees an error from this - // function (to prevent redirects) passing through to it. - return http.ErrUseLastResponse - } - } - - c.redirectSetup.Do(redirFunc) - client := &Client{ addr: u, config: c, } if token := os.Getenv(EnvVaultToken); token != "" { - client.SetToken(token) + client.token = token } return client, nil @@ -330,6 +326,9 @@ func NewClient(c *Config) (*Client, error) { // "://:". Setting this on a client will override the // value of VAULT_ADDR environment variable. func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + var err error if c.addr, err = url.Parse(addr); err != nil { return fmt.Errorf("failed to set address: %v", err) @@ -340,56 +339,112 @@ func (c *Client) SetAddress(addr string) error { // Address returns the Vault URL the client is configured to connect to func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.addr.String() } // SetMaxRetries sets the number of retries that will be used in the case of certain errors func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + c.config.MaxRetries = retries } // SetClientTimeout sets the client request timeout func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + c.config.Timeout = timeout } // SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs // for a given operation and path func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.wrappingLookupFunc = lookupFunc } +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.mfaCreds = creds +} + // Token returns the access token being used by this client. It will // return the empty string if there is no token set. func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.token } // SetToken sets the token directly. This won't perform any auth // verification, it simply sets the token properly for future requests. func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = v } // ClearToken deletes the token if it is set or does nothing otherwise. func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = "" } // SetHeaders sets the headers to be used for future requests. func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers } -// Clone creates a copy of this client. +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. func (c *Client) Clone() (*Client, error) { - return NewClient(c.config) + c.modifyLock.RLock() + c.config.modifyLock.RLock() + config := c.config + c.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + } + config.modifyLock.RUnlock() + + return NewClient(newConfig) } // SetPolicyOverride sets whether requests should be sent with the policy // override flag to request overriding soft-mandatory Sentinel policies (both // RGPs and EGPs) func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.policyOverride = override } @@ -397,6 +452,9 @@ func (c *Client) SetPolicyOverride(override bool) { // configured for this client. This is an advanced method and generally // doesn't need to be called externally. func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV // record and take the highest match; this is not designed for high-availability, just discovery var host string = c.addr.Host @@ -453,6 +511,11 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. func (c *Client) RawRequest(r *Request) (*Response, error) { + c.modifyLock.RLock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + redirectCount := 0 START: req, err := r.ToHTTP() diff --git a/command/server.go b/command/server.go index 59c1c5770d..63b73ed422 100644 --- a/command/server.go +++ b/command/server.go @@ -18,8 +18,6 @@ import ( "syscall" "time" - "golang.org/x/net/http2" - colorable "github.com/mattn/go-colorable" log "github.com/mgutz/logxi/v1" testing "github.com/mitchellh/go-testing-interface" @@ -228,6 +226,8 @@ func (c *ServerCommand) Run(args []string) int { infoKeys := make([]string, 0, 10) info := make(map[string]string) + info["log level"] = logLevel + infoKeys = append(infoKeys, "log level") var seal vault.Seal = &vault.DefaultSeal{} @@ -430,11 +430,10 @@ CLUSTER_SYNTHESIS_COMPLETE: // Compile server information for output later info["storage"] = config.Storage.Type - info["log level"] = logLevel info["mlock"] = fmt.Sprintf( "supported: %v, enabled: %v", mlock.Supported(), !config.DisableMlock && mlock.Supported()) - infoKeys = append(infoKeys, "log level", "mlock", "storage") + infoKeys = append(infoKeys, "mlock", "storage") if coreConfig.ClusterAddr != "" { info["cluster address"] = coreConfig.ClusterAddr @@ -666,14 +665,11 @@ CLUSTER_SYNTHESIS_COMPLETE: )) } - // Initialize the HTTP server - server := &http.Server{} - if err := http2.ConfigureServer(server, nil); err != nil { - c.Ui.Output(fmt.Sprintf("Error configuring server for HTTP/2: %s", err)) - return 1 - } - server.Handler = handler + // Initialize the HTTP servers for _, ln := range lns { + server := &http.Server{ + Handler: handler, + } go server.Serve(ln) } @@ -858,7 +854,6 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m defer c.cleanupGuard.Do(testCluster.Cleanup) info["cluster parameters path"] = testCluster.TempDir - info["log level"] = "trace" infoKeys = append(infoKeys, "cluster parameters path", "log level") for i, core := range testCluster.Cores { diff --git a/http/forwarding_test.go b/http/forwarding_test.go index 4f1aefef9c..9fc40ffeab 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -52,8 +52,8 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient = cleanhttp.DefaultClient() config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig + client, err := api.NewClient(config) if err != nil { t.Fatal(err) @@ -100,8 +100,8 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient = cleanhttp.DefaultClient() config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig + client, err := api.NewClient(config) if err != nil { t.Fatal(err) @@ -505,6 +505,9 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { transport = cleanhttp.DefaultTransport() transport.TLSClientConfig = cores[0].TLSConfig + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } client = &http.Client{ Transport: transport, diff --git a/http/testing.go b/http/testing.go index 543b3e6670..bda4819d4b 100644 --- a/http/testing.go +++ b/http/testing.go @@ -6,8 +6,6 @@ import ( "net/http" "testing" - "golang.org/x/net/http2" - "github.com/hashicorp/vault/vault" ) @@ -38,9 +36,6 @@ func TestServerWithListener(t *testing.T, ln net.Listener, addr string, core *va Addr: ln.Addr().String(), Handler: mux, } - if err := http2.ConfigureServer(server, nil); err != nil { - t.Fatal(err) - } go server.Serve(ln) } diff --git a/vault/testing.go b/vault/testing.go index 7a25e6f53b..2373e61417 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1009,9 +1009,6 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te Handler: handler, } servers = append(servers, server) - if err := http2.ConfigureServer(server, nil); err != nil { - t.Fatal(err) - } } // Create three cores with the same physical and different redirect/cluster @@ -1251,7 +1248,10 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { transport := cleanhttp.DefaultPooledTransport() - transport.TLSClientConfig = tlsConfig + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } client := &http.Client{ Transport: transport, CheckRedirect: func(*http.Request, []*http.Request) error { From d7e48b4aaa24c5ba76ece59ee86bff5e56d42719 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 10:38:43 -0400 Subject: [PATCH 064/329] changelog++ --- CHANGELOG.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6501097b8..308fd5bd8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ DEPRECATIONS/CHANGES: + * API HTTP client behavior: When calling `NewClient` the API no longer + modifies the provided client/transport. In particular this means it will no + longer enable redirection limiting and HTTP/2 support on custom clients. It + is suggested that if you want to make changes to an HTTP client that you use + one created by `DefaultConfig` as a starting point. * AWS EC2 client nonce behavior: The client nonce generated by the backend that gets returned along with the authentication response will be audited in plaintext. If this is undesired, the clients can choose to supply a custom @@ -32,10 +37,13 @@ IMPROVEMENTS: BUG FIXES: * api: Fix panic when setting a custom HTTP client but with a nil transport - [GH-3437] + [GH-3435] [GH-3437] * auth/radius: Fix logging in in some situations [GH-3461] * core: Fix memleak when a connection would connect to the cluster port and then go away [GH-3513] + * core: Fix panic if a single-use token is used to step-down or seal [GH-3497] + * core: Set rather than add headers to prevent some duplicated headers in + responses when requests were forwarded to the active node [GH-3485] * physical/etcd3: Fix some listing issues due to how etcd3 does prefix matching [GH-3406] * physical/file: Fix listing when underscores are the first component of a From 6eb744e3796c8b178ddfce741c06958ad32c6217 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 15:35:06 -0400 Subject: [PATCH 065/329] Fix some tests --- http/sys_seal_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index 82ec71956d..82d40a2e41 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -31,6 +31,7 @@ func TestSysSealStatus(t *testing.T) { "n": json.Number("3"), "progress": json.Number("0"), "nonce": "", + "type": "shamir", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) @@ -119,6 +120,7 @@ func TestSysUnseal(t *testing.T) { "n": json.Number("3"), "progress": json.Number(fmt.Sprintf("%d", i+1)), "nonce": "", + "type": "shamir", } if i == len(keys)-1 { expected["sealed"] = false @@ -196,6 +198,7 @@ func TestSysUnseal_Reset(t *testing.T) { "t": json.Number("3"), "n": json.Number("5"), "progress": json.Number(strconv.Itoa(i + 1)), + "type": "shamir", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) From 66642a093532ed5d2a722c16003b74420a533517 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Thu, 2 Nov 2017 16:05:48 -0400 Subject: [PATCH 066/329] External identity groups (#3447) * external identity groups * add local LDAP groups as well to group aliases * add group aliases for okta credential backend * Fix panic in tests * fix build failure * remove duplicated struct tag * add test steps to test out removal of group member during renewals * Add comment for having a prefix check in router * fix tests * s/parent_id/canonical_id * s/parent/canonical in comments and errors --- builtin/credential/github/path_login.go | 43 +- builtin/credential/ldap/backend.go | 26 +- builtin/credential/ldap/path_login.go | 26 +- builtin/credential/okta/backend.go | 18 +- builtin/credential/okta/path_login.go | 28 +- command/identity_group_aliases_integ_test.go | 368 +++++++++++++ helper/identity/sentinel.go | 2 +- helper/identity/types.pb.go | 121 ++-- helper/identity/types.proto | 25 +- logical/auth.go | 8 +- vault/core.go | 4 +- vault/identity_lookup.go | 178 +++++- vault/identity_lookup_test.go | 219 ++++++++ vault/identity_store.go | 27 +- vault/identity_store_aliases.go | 152 ++++- vault/identity_store_aliases_test.go | 50 +- vault/identity_store_entities.go | 33 +- vault/identity_store_entities_test.go | 44 +- vault/identity_store_group_aliases.go | 279 ++++++++++ vault/identity_store_group_aliases_test.go | 163 ++++++ vault/identity_store_groups.go | 69 ++- vault/identity_store_groups_test.go | 118 +++- vault/identity_store_schema.go | 71 ++- vault/identity_store_structs.go | 7 + vault/identity_store_test.go | 2 +- vault/identity_store_util.go | 550 ++++++++++++++----- vault/request_handling.go | 24 +- vault/router.go | 20 +- 28 files changed, 2287 insertions(+), 388 deletions(-) create mode 100644 command/identity_group_aliases_integ_test.go create mode 100644 vault/identity_lookup_test.go create mode 100644 vault/identity_store_group_aliases.go create mode 100644 vault/identity_store_group_aliases_test.go diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index a6cc13614f..148bbc3e52 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -74,7 +74,7 @@ func (b *backend) pathLogin( return logical.ErrorResponse(fmt.Sprintf("error sanitizing TTLs: %s", err)), nil } - return &logical.Response{ + resp := &logical.Response{ Auth: &logical.Auth{ InternalData: map[string]interface{}{ "token": token, @@ -93,7 +93,15 @@ func (b *backend) pathLogin( Name: *verifyResp.User.Login, }, }, - }, nil + } + + for _, teamName := range verifyResp.TeamNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: teamName, + }) + } + + return resp, nil } func (b *backend) pathLoginRenew( @@ -125,7 +133,22 @@ func (b *backend) pathLoginRenew( if err != nil { return nil, err } - return framework.LeaseExtend(config.TTL, config.MaxTTL, b.System())(req, d) + + resp, err := framework.LeaseExtend(config.TTL, config.MaxTTL, b.System())(req, d) + if err != nil { + return nil, err + } + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, teamName := range verifyResp.TeamNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: teamName, + }) + } + + return resp, nil } func (b *backend) verifyCredentials(req *logical.Request, token string) (*verifyCredentialsResp, *logical.Response, error) { @@ -233,14 +256,16 @@ func (b *backend) verifyCredentials(req *logical.Request, token string) (*verify } return &verifyCredentialsResp{ - User: user, - Org: org, - Policies: append(groupPoliciesList, userPoliciesList...), + User: user, + Org: org, + Policies: append(groupPoliciesList, userPoliciesList...), + TeamNames: teamNames, }, nil, nil } type verifyCredentialsResp struct { - User *github.User - Org *github.Organization - Policies []string + User *github.User + Org *github.Organization + Policies []string + TeamNames []string } diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index 835b4a6df2..08af4c77c4 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -88,22 +88,22 @@ func EscapeLDAPValue(input string) string { return input } -func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) { +func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, []string, error) { cfg, err := b.Config(req) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if cfg == nil { - return nil, logical.ErrorResponse("ldap backend not configured"), nil + return nil, logical.ErrorResponse("ldap backend not configured"), nil, nil } c, err := cfg.DialLDAP() if err != nil { - return nil, logical.ErrorResponse(err.Error()), nil + return nil, logical.ErrorResponse(err.Error()), nil, nil } if c == nil { - return nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil + return nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil, nil } // Clean connection @@ -111,7 +111,7 @@ func (b *backend) Login(req *logical.Request, username string, password string) userBindDN, err := b.getUserBindDN(cfg, c, username) if err != nil { - return nil, logical.ErrorResponse(err.Error()), nil + return nil, logical.ErrorResponse(err.Error()), nil, nil } if b.Logger().IsDebug() { @@ -119,7 +119,7 @@ func (b *backend) Login(req *logical.Request, username string, password string) } if cfg.DenyNullBind && len(password) == 0 { - return nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil + return nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil } // Try to bind as the login user. This is where the actual authentication takes place. @@ -129,14 +129,14 @@ func (b *backend) Login(req *logical.Request, username string, password string) err = c.UnauthenticatedBind(userBindDN) } if err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil + return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil, nil } // We re-bind to the BindDN if it's defined because we assume // the BindDN should be the one to search, not the user logging in. if cfg.BindDN != "" && cfg.BindPassword != "" { if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil + return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil, nil } if b.Logger().IsDebug() { b.Logger().Debug("auth/ldap: Re-Bound to original BindDN") @@ -145,12 +145,12 @@ func (b *backend) Login(req *logical.Request, username string, password string) userDN, err := b.getUserDN(cfg, c, userBindDN) if err != nil { - return nil, logical.ErrorResponse(err.Error()), nil + return nil, logical.ErrorResponse(err.Error()), nil, nil } ldapGroups, err := b.getLdapGroups(cfg, c, userDN, username) if err != nil { - return nil, logical.ErrorResponse(err.Error()), nil + return nil, logical.ErrorResponse(err.Error()), nil, nil } if b.Logger().IsDebug() { b.Logger().Debug("auth/ldap: Groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) @@ -199,10 +199,10 @@ func (b *backend) Login(req *logical.Request, username string, password string) } ldapResponse.Data["error"] = errStr - return nil, ldapResponse, nil + return nil, ldapResponse, nil, nil } - return policies, ldapResponse, nil + return policies, ldapResponse, allGroups, nil } /* diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 00014a6cba..c42015fe26 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -55,7 +55,7 @@ func (b *backend) pathLogin( username := d.Get("username").(string) password := d.Get("password").(string) - policies, resp, err := b.Login(req, username, password) + policies, resp, groupNames, err := b.Login(req, username, password) // Handle an internal error if err != nil { return nil, err @@ -87,6 +87,12 @@ func (b *backend) pathLogin( Name: username, }, } + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } return resp, nil } @@ -96,7 +102,7 @@ func (b *backend) pathLoginRenew( username := req.Auth.Metadata["username"] password := req.Auth.InternalData["password"].(string) - loginPolicies, resp, err := b.Login(req, username, password) + loginPolicies, resp, groupNames, err := b.Login(req, username, password) if len(loginPolicies) == 0 { return resp, err } @@ -105,7 +111,21 @@ func (b *backend) pathLoginRenew( return nil, fmt.Errorf("policies have changed, not renewing") } - return framework.LeaseExtend(0, 0, b.System())(req, d) + resp, err = framework.LeaseExtend(0, 0, b.System())(req, d) + if err != nil { + return nil, err + } + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + + return resp, nil } const pathLoginSyn = ` diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index 951d190973..eab2c7ce58 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -47,13 +47,13 @@ type backend struct { *framework.Backend } -func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, error) { +func (b *backend) Login(req *logical.Request, username string, password string) ([]string, *logical.Response, []string, error) { cfg, err := b.Config(req.Storage) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if cfg == nil { - return nil, logical.ErrorResponse("Okta backend not configured"), nil + return nil, logical.ErrorResponse("Okta backend not configured"), nil, nil } client := cfg.OktaClient() @@ -71,16 +71,16 @@ func (b *backend) Login(req *logical.Request, username string, password string) "password": password, }) if err != nil { - return nil, nil, err + return nil, nil, nil, err } var result authResult rsp, err := client.Do(authReq, &result) if err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil + return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil, nil } if rsp == nil { - return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil + return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil, nil } oktaResponse := &logical.Response{ @@ -92,7 +92,7 @@ func (b *backend) Login(req *logical.Request, username string, password string) if cfg.Token != "" { oktaGroups, err := b.getOktaGroups(client, &result.Embedded.User) if err != nil { - return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil + return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil, nil } if len(oktaGroups) == 0 { errString := fmt.Sprintf( @@ -142,10 +142,10 @@ func (b *backend) Login(req *logical.Request, username string, password string) } oktaResponse.Data["error"] = errStr - return nil, oktaResponse, nil + return nil, oktaResponse, nil, nil } - return policies, oktaResponse, nil + return policies, oktaResponse, allGroups, nil } func (b *backend) getOktaGroups(client *okta.Client, user *okta.User) ([]string, error) { diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index b32a9b7a9b..f8cb8be579 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -57,7 +57,7 @@ func (b *backend) pathLogin( username := d.Get("username").(string) password := d.Get("password").(string) - policies, resp, err := b.Login(req, username, password) + policies, resp, groupNames, err := b.Login(req, username, password) // Handle an internal error if err != nil { return nil, err @@ -96,6 +96,13 @@ func (b *backend) pathLogin( Name: username, }, } + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + return resp, nil } @@ -105,7 +112,7 @@ func (b *backend) pathLoginRenew( username := req.Auth.Metadata["username"] password := req.Auth.InternalData["password"].(string) - loginPolicies, resp, err := b.Login(req, username, password) + loginPolicies, resp, groupNames, err := b.Login(req, username, password) if len(loginPolicies) == 0 { return resp, err } @@ -119,7 +126,22 @@ func (b *backend) pathLoginRenew( return nil, err } - return framework.LeaseExtend(cfg.TTL, cfg.MaxTTL, b.System())(req, d) + resp, err = framework.LeaseExtend(cfg.TTL, cfg.MaxTTL, b.System())(req, d) + if err != nil { + return nil, err + } + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + + return resp, nil + } func (b *backend) getConfig(req *logical.Request) (*ConfigEntry, error) { diff --git a/command/identity_group_aliases_integ_test.go b/command/identity_group_aliases_integ_test.go new file mode 100644 index 0000000000..8fc832e81c --- /dev/null +++ b/command/identity_group_aliases_integ_test.go @@ -0,0 +1,368 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/ldap" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/vault" + logxi "github.com/mgutz/logxi/v1" +) + +func TestIdentityStore_Integ_GroupAliases(t *testing.T) { + var err error + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logxi.NullLog, + CredentialBackends: map[string]logical.Factory{ + "ldap": ldap.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err = client.Sys().EnableAuthWithOptions("ldap", &api.EnableAuthOptions{ + Type: "ldap", + }) + if err != nil { + t.Fatal(err) + } + + auth, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + accessor := auth["ldap/"].Accessor + + secret, err := client.Logical().Write("identity/group", map[string]interface{}{ + "type": "external", + "name": "ldap_Italians", + }) + if err != nil { + t.Fatal(err) + } + italiansGroupID := secret.Data["id"].(string) + + secret, err = client.Logical().Write("identity/group", map[string]interface{}{ + "type": "external", + "name": "ldap_Scientists", + }) + if err != nil { + t.Fatal(err) + } + scientistsGroupID := secret.Data["id"].(string) + + secret, err = client.Logical().Write("identity/group", map[string]interface{}{ + "type": "external", + "name": "ldap_devops", + }) + if err != nil { + t.Fatal(err) + } + devopsGroupID := secret.Data["id"].(string) + + secret, err = client.Logical().Write("identity/group-alias", map[string]interface{}{ + "name": "Italians", + "canonical_id": italiansGroupID, + "mount_accessor": accessor, + }) + if err != nil { + t.Fatal(err) + } + + secret, err = client.Logical().Write("identity/group-alias", map[string]interface{}{ + "name": "Scientists", + "canonical_id": scientistsGroupID, + "mount_accessor": accessor, + }) + if err != nil { + t.Fatal(err) + } + + secret, err = client.Logical().Write("identity/group-alias", map[string]interface{}{ + "name": "devops", + "canonical_id": devopsGroupID, + "mount_accessor": accessor, + }) + if err != nil { + t.Fatal(err) + } + + secret, err = client.Logical().Read("identity/group/id/" + italiansGroupID) + if err != nil { + t.Fatal(err) + } + aliasMap := secret.Data["alias"].(map[string]interface{}) + if aliasMap["canonical_id"] != italiansGroupID || + aliasMap["name"] != "Italians" || + aliasMap["mount_accessor"] != accessor { + t.Fatalf("bad: group alias: %#v\n", aliasMap) + } + + secret, err = client.Logical().Read("identity/group/id/" + scientistsGroupID) + if err != nil { + t.Fatal(err) + } + aliasMap = secret.Data["alias"].(map[string]interface{}) + if aliasMap["canonical_id"] != scientistsGroupID || + aliasMap["name"] != "Scientists" || + aliasMap["mount_accessor"] != accessor { + t.Fatalf("bad: group alias: %#v\n", aliasMap) + } + + // Configure LDAP auth backend + secret, err = client.Logical().Write("auth/ldap/config", map[string]interface{}{ + "url": "ldap://ldap.forumsys.com", + "userattr": "uid", + "userdn": "dc=example,dc=com", + "groupdn": "dc=example,dc=com", + "binddn": "cn=read-only-admin,dc=example,dc=com", + }) + if err != nil { + t.Fatal(err) + } + + // Create a local group in LDAP backend + secret, err = client.Logical().Write("auth/ldap/groups/devops", map[string]interface{}{ + "policies": "default", + }) + if err != nil { + t.Fatal(err) + } + + // Create a local group in LDAP backend + secret, err = client.Logical().Write("auth/ldap/groups/engineers", map[string]interface{}{ + "policies": "default", + }) + if err != nil { + t.Fatal(err) + } + + // Create a local user in LDAP + secret, err = client.Logical().Write("auth/ldap/users/tesla", map[string]interface{}{ + "policies": "default", + "groups": "engineers,devops", + }) + if err != nil { + t.Fatal(err) + } + + // Login with LDAP and create a token + secret, err = client.Logical().Write("auth/ldap/login/tesla", map[string]interface{}{ + "password": "password", + }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken + + // Lookup the token to get the entity ID + secret, err = client.Auth().Token().Lookup(token) + if err != nil { + t.Fatal(err) + } + entityID := secret.Data["entity_id"].(string) + + // Re-read the Scientists, Italians and devops group. This entity ID should have + // been added to both of these groups by now. + secret, err = client.Logical().Read("identity/group/id/" + italiansGroupID) + if err != nil { + t.Fatal(err) + } + groupMap := secret.Data + found := false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of Italians group") + } + + secret, err = client.Logical().Read("identity/group/id/" + scientistsGroupID) + if err != nil { + t.Fatal(err) + } + groupMap = secret.Data + found = false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of Scientists group") + } + + secret, err = client.Logical().Read("identity/group/id/" + devopsGroupID) + if err != nil { + t.Fatal(err) + } + groupMap = secret.Data + found = false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of devops group") + } + + identityStore := cores[0].IdentityStore() + + group, err := identityStore.MemDBGroupByID(italiansGroupID, true) + if err != nil { + t.Fatal(err) + } + + // Remove its member entities + group.MemberEntityIDs = nil + + err = identityStore.UpsertGroup(group, true) + if err != nil { + t.Fatal(err) + } + + group, err = identityStore.MemDBGroupByID(italiansGroupID, true) + if err != nil { + t.Fatal(err) + } + if group.MemberEntityIDs != nil { + t.Fatalf("failed to remove entity ID from the group") + } + + group, err = identityStore.MemDBGroupByID(scientistsGroupID, true) + if err != nil { + t.Fatal(err) + } + + // Remove its member entities + group.MemberEntityIDs = nil + + err = identityStore.UpsertGroup(group, true) + if err != nil { + t.Fatal(err) + } + + group, err = identityStore.MemDBGroupByID(scientistsGroupID, true) + if err != nil { + t.Fatal(err) + } + if group.MemberEntityIDs != nil { + t.Fatalf("failed to remove entity ID from the group") + } + + group, err = identityStore.MemDBGroupByID(devopsGroupID, true) + if err != nil { + t.Fatal(err) + } + + // Remove its member entities + group.MemberEntityIDs = nil + + err = identityStore.UpsertGroup(group, true) + if err != nil { + t.Fatal(err) + } + + group, err = identityStore.MemDBGroupByID(devopsGroupID, true) + if err != nil { + t.Fatal(err) + } + if group.MemberEntityIDs != nil { + t.Fatalf("failed to remove entity ID from the group") + } + + _, err = client.Auth().Token().Renew(token, 0) + if err != nil { + t.Fatal(err) + } + + // EntityIDs should have been added to the groups again during renewal + secret, err = client.Logical().Read("identity/group/id/" + italiansGroupID) + if err != nil { + t.Fatal(err) + } + groupMap = secret.Data + found = false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of Italians group") + } + + secret, err = client.Logical().Read("identity/group/id/" + scientistsGroupID) + if err != nil { + t.Fatal(err) + } + groupMap = secret.Data + found = false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of Italians group") + } + + secret, err = client.Logical().Read("identity/group/id/" + devopsGroupID) + if err != nil { + t.Fatal(err) + } + + groupMap = secret.Data + found = false + for _, entityIDRaw := range groupMap["member_entity_ids"].([]interface{}) { + if entityIDRaw.(string) == entityID { + found = true + } + } + if !found { + t.Fatalf("expected entity ID %q to be part of devops group") + } + + // Remove user tesla from the devops group in LDAP backend + secret, err = client.Logical().Write("auth/ldap/users/tesla", map[string]interface{}{ + "policies": "default", + "groups": "engineers", + }) + if err != nil { + t.Fatal(err) + } + + // Renewing the token now should remove its entity ID from the devops + // group + _, err = client.Auth().Token().Renew(token, 0) + if err != nil { + t.Fatal(err) + } + + group, err = identityStore.MemDBGroupByID(devopsGroupID, true) + if err != nil { + t.Fatal(err) + } + if group.MemberEntityIDs != nil { + t.Fatalf("failed to remove entity ID from the group") + } +} diff --git a/helper/identity/sentinel.go b/helper/identity/sentinel.go index 5ed4587ee8..45f4b807f4 100644 --- a/helper/identity/sentinel.go +++ b/helper/identity/sentinel.go @@ -50,7 +50,7 @@ func (p *Alias) SentinelGet(key string) (interface{}, error) { case "last_update_time": return ptypes.TimestampString(p.LastUpdateTime), nil case "merged_from_entity_ids": - return p.MergedFromEntityIDs, nil + return p.MergedFromCanonicalIDs, nil } return nil, nil diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index 9411a96160..386e7e2ed0 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -59,6 +59,15 @@ type Group struct { // the groups belonging to a particular bucket during invalidation of the // storage key. BucketKeyHash string `sentinel:"" protobuf:"bytes,10,opt,name=bucket_key_hash,json=bucketKeyHash" json:"bucket_key_hash,omitempty"` + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + Alias *Alias `sentinel:"" protobuf:"bytes,11,opt,name=alias" json:"alias,omitempty"` + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + Type string `sentinel:"" protobuf:"bytes,12,opt,name=type" json:"type,omitempty"` } func (m *Group) Reset() { *m = Group{} } @@ -136,6 +145,20 @@ func (m *Group) GetBucketKeyHash() string { return "" } +func (m *Group) GetAlias() *Alias { + if m != nil { + return m.Alias + } + return nil +} + +func (m *Group) GetType() string { + if m != nil { + return m.Type + } + return "" +} + // Entity represents an entity that gets persisted and indexed. // Entity is fundamentally composed of zero or many aliases. type Entity struct { @@ -253,8 +276,8 @@ func (m *Entity) GetBucketKeyHash() string { type Alias struct { // ID is the unique identifier that represents this alias ID string `sentinel:"" protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - // EntityID is the entity identifier to which this alias belongs to - EntityID string `sentinel:"" protobuf:"bytes,2,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // CanonicalID is the entity identifier to which this alias belongs to + CanonicalID string `sentinel:"" protobuf:"bytes,2,opt,name=canonical_id,json=canonicalId" json:"canonical_id,omitempty"` // MountType is the backend mount's type to which this alias belongs to. // This enables categorically querying aliases of specific backend types. MountType string `sentinel:"" protobuf:"bytes,3,opt,name=mount_type,json=mountType" json:"mount_type,omitempty"` @@ -270,8 +293,8 @@ type Alias struct { // against their metadata. Metadata map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Name is the identifier of this alias in its authentication source. - // This does not uniquely identify a alias in Vault. This in conjunction - // with MountAccessor form to be the factors that represent a alias in a + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a // unique way. Aliases will be indexed based on this combined uniqueness // factor. Name string `sentinel:"" protobuf:"bytes,7,opt,name=name" json:"name,omitempty"` @@ -281,10 +304,8 @@ type Alias struct { // alias got modified. This is helpful in filtering out aliases based // on its age and to take action on them, if desired. LastUpdateTime *google_protobuf.Timestamp `sentinel:"" protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime" json:"last_update_time,omitempty"` - // MergedFromEntityIDs is the FIFO history of merging activity by entity IDs from - // which this alias is transfered over to the entity to which it - // currently belongs to. - MergedFromEntityIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_entity_ids,json=mergedFromEntityIDs" json:"merged_from_entity_ids,omitempty"` + // MergedFromCanonicalIDs is the FIFO history of merging activity + MergedFromCanonicalIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_canonical_ids,json=mergedFromCanonicalIds" json:"merged_from_canonical_ids,omitempty"` } func (m *Alias) Reset() { *m = Alias{} } @@ -299,9 +320,9 @@ func (m *Alias) GetID() string { return "" } -func (m *Alias) GetEntityID() string { +func (m *Alias) GetCanonicalID() string { if m != nil { - return m.EntityID + return m.CanonicalID } return "" } @@ -355,9 +376,9 @@ func (m *Alias) GetLastUpdateTime() *google_protobuf.Timestamp { return nil } -func (m *Alias) GetMergedFromEntityIDs() []string { +func (m *Alias) GetMergedFromCanonicalIDs() []string { if m != nil { - return m.MergedFromEntityIDs + return m.MergedFromCanonicalIDs } return nil } @@ -371,41 +392,43 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 570 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe5, 0x38, 0x1f, 0xf6, 0xa4, 0x4d, 0xcb, 0x82, 0x90, 0x15, 0x54, 0x08, 0x95, 0x40, - 0x86, 0x83, 0x2b, 0xb5, 0x17, 0x28, 0x07, 0x54, 0x89, 0x02, 0x15, 0x42, 0x42, 0x56, 0x39, 0x5b, - 0x9b, 0x78, 0x9a, 0xac, 0x1a, 0x7b, 0x2d, 0xef, 0x1a, 0xe1, 0x27, 0xe4, 0x39, 0x38, 0xf1, 0x1a, - 0xc8, 0xb3, 0x76, 0x62, 0x08, 0x5f, 0x15, 0xb9, 0xd9, 0xff, 0x99, 0x1d, 0xcf, 0xce, 0xff, 0x37, - 0x86, 0xa1, 0x2e, 0x33, 0x54, 0x41, 0x96, 0x4b, 0x2d, 0x99, 0x23, 0x62, 0x4c, 0xb5, 0xd0, 0xe5, - 0xf8, 0xc1, 0x5c, 0xca, 0xf9, 0x12, 0x8f, 0x48, 0x9f, 0x16, 0x57, 0x47, 0x5a, 0x24, 0xa8, 0x34, - 0x4f, 0x32, 0x93, 0x7a, 0xf8, 0xcd, 0x86, 0xde, 0x9b, 0x5c, 0x16, 0x19, 0x1b, 0x41, 0x47, 0xc4, - 0x9e, 0x35, 0xb1, 0x7c, 0x37, 0xec, 0x88, 0x98, 0x31, 0xe8, 0xa6, 0x3c, 0x41, 0xaf, 0x43, 0x0a, - 0x3d, 0xb3, 0x31, 0x38, 0x99, 0x5c, 0x8a, 0x99, 0x40, 0xe5, 0xd9, 0x13, 0xdb, 0x77, 0xc3, 0xd5, - 0x3b, 0xf3, 0x61, 0x3f, 0xe3, 0x39, 0xa6, 0x3a, 0x9a, 0x57, 0xf5, 0x22, 0x11, 0x2b, 0xaf, 0x4b, - 0x39, 0x23, 0xa3, 0xd3, 0x67, 0x2e, 0x62, 0xc5, 0x9e, 0xc2, 0xad, 0x04, 0x93, 0x29, 0xe6, 0x91, - 0xe9, 0x92, 0x52, 0x7b, 0x94, 0xba, 0x67, 0x02, 0xe7, 0xa4, 0x57, 0xb9, 0xcf, 0xc1, 0x49, 0x50, - 0xf3, 0x98, 0x6b, 0xee, 0xf5, 0x27, 0xb6, 0x3f, 0x3c, 0x3e, 0x08, 0x9a, 0xdb, 0x05, 0x54, 0x31, - 0x78, 0x5f, 0xc7, 0xcf, 0x53, 0x9d, 0x97, 0xe1, 0x2a, 0x9d, 0xbd, 0x84, 0xdd, 0x59, 0x8e, 0x5c, - 0x0b, 0x99, 0x46, 0xd5, 0xb5, 0xbd, 0xc1, 0xc4, 0xf2, 0x87, 0xc7, 0xe3, 0xc0, 0xcc, 0x24, 0x68, - 0x66, 0x12, 0x5c, 0x36, 0x33, 0x09, 0x77, 0x9a, 0x03, 0x95, 0xc4, 0x5e, 0xc1, 0xfe, 0x92, 0x2b, - 0x1d, 0x15, 0x59, 0xcc, 0x35, 0x9a, 0x1a, 0xce, 0x5f, 0x6b, 0x8c, 0xaa, 0x33, 0x1f, 0xe9, 0x08, - 0x55, 0x79, 0x08, 0x3b, 0x89, 0x8c, 0xc5, 0x55, 0x19, 0x89, 0x34, 0xc6, 0xcf, 0x9e, 0x3b, 0xb1, - 0xfc, 0x6e, 0x38, 0x34, 0xda, 0x45, 0x25, 0xb1, 0xc7, 0xb0, 0x37, 0x2d, 0x66, 0xd7, 0xa8, 0xa3, - 0x6b, 0x2c, 0xa3, 0x05, 0x57, 0x0b, 0x0f, 0x68, 0xea, 0xbb, 0x46, 0x7e, 0x87, 0xe5, 0x5b, 0xae, - 0x16, 0xe3, 0x17, 0xb0, 0xfb, 0xc3, 0x65, 0xd9, 0x3e, 0xd8, 0xd7, 0x58, 0xd6, 0xa6, 0x55, 0x8f, - 0xec, 0x0e, 0xf4, 0x3e, 0xf1, 0x65, 0xd1, 0xd8, 0x66, 0x5e, 0x4e, 0x3b, 0xcf, 0xac, 0xc3, 0x2f, - 0x36, 0xf4, 0xcd, 0x5c, 0xd9, 0x13, 0x18, 0xf0, 0xa5, 0xe0, 0x0a, 0x95, 0x67, 0xd1, 0x4c, 0xf7, - 0xd6, 0x33, 0x3d, 0xab, 0x02, 0x61, 0x13, 0xaf, 0xa9, 0xe8, 0x6c, 0x50, 0x61, 0xb7, 0xa8, 0x38, - 0x6d, 0x79, 0xd4, 0xa5, 0x7a, 0xf7, 0xd7, 0xf5, 0xcc, 0x27, 0xff, 0xdd, 0xa4, 0xde, 0x16, 0x4c, - 0xea, 0xdf, 0xd8, 0x24, 0x42, 0x32, 0x9f, 0x63, 0xdc, 0x46, 0x72, 0xd0, 0x20, 0x59, 0x05, 0xd6, - 0x48, 0xb6, 0x97, 0xc0, 0xf9, 0x69, 0x09, 0x7e, 0xe1, 0xa4, 0xbb, 0x75, 0x27, 0xbf, 0xda, 0xd0, - 0x23, 0x9b, 0x36, 0x76, 0xf6, 0x1e, 0xb8, 0xab, 0xfe, 0xeb, 0x73, 0x0e, 0xd6, 0x8d, 0xb3, 0x03, - 0x80, 0x44, 0x16, 0xa9, 0x8e, 0xaa, 0x5f, 0x45, 0x6d, 0xa0, 0x4b, 0xca, 0x65, 0x99, 0x21, 0x7b, - 0x04, 0x23, 0x13, 0xe6, 0xb3, 0x19, 0x2a, 0x25, 0x73, 0xaf, 0x6b, 0x3a, 0x27, 0xf5, 0xac, 0x16, - 0xd7, 0x55, 0x32, 0xae, 0x17, 0xe4, 0x56, 0x53, 0xe5, 0x03, 0xd7, 0x8b, 0x3f, 0xef, 0x2b, 0x35, - 0xfd, 0x5b, 0x14, 0x1a, 0xb4, 0x06, 0x2d, 0xb4, 0x36, 0xf0, 0x70, 0xb6, 0x80, 0x87, 0x7b, 0x63, - 0x3c, 0x4e, 0xe0, 0x6e, 0x8d, 0xc7, 0x55, 0x2e, 0x93, 0x36, 0x23, 0x40, 0x00, 0xdc, 0x36, 0xd1, - 0xd7, 0xb9, 0x4c, 0x56, 0x9c, 0xfc, 0x97, 0xc7, 0xd3, 0x3e, 0x75, 0x75, 0xf2, 0x3d, 0x00, 0x00, - 0xff, 0xff, 0x17, 0x1c, 0xfc, 0x89, 0xd8, 0x05, 0x00, 0x00, + // 603 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0xc7, 0xd5, 0xa6, 0x9f, 0x27, 0x5d, 0x37, 0x2c, 0x84, 0x4c, 0xa5, 0x41, 0x37, 0x69, 0x28, + 0x70, 0x91, 0x49, 0xe3, 0x86, 0x8d, 0x0b, 0x34, 0xc1, 0x80, 0x09, 0x21, 0xa1, 0x68, 0x5c, 0x47, + 0x6e, 0xe2, 0xb5, 0xd6, 0x92, 0x38, 0x8a, 0x1d, 0x44, 0x5e, 0x87, 0x97, 0xe1, 0x69, 0x78, 0x07, + 0xe4, 0xe3, 0xa6, 0x0d, 0x74, 0x7c, 0x4c, 0xdb, 0x9d, 0xf3, 0x3f, 0xc7, 0xc7, 0x27, 0xe7, 0xff, + 0x3b, 0xe0, 0xea, 0x2a, 0xe7, 0xca, 0xcf, 0x0b, 0xa9, 0x25, 0x19, 0x88, 0x98, 0x67, 0x5a, 0xe8, + 0x6a, 0xf2, 0x78, 0x2e, 0xe5, 0x3c, 0xe1, 0x87, 0xa8, 0xcf, 0xca, 0xcb, 0x43, 0x2d, 0x52, 0xae, + 0x34, 0x4b, 0x73, 0x9b, 0xba, 0xff, 0xad, 0x03, 0xdd, 0x77, 0x85, 0x2c, 0x73, 0x32, 0x86, 0xb6, + 0x88, 0x69, 0x6b, 0xda, 0xf2, 0x86, 0x41, 0x5b, 0xc4, 0x84, 0x40, 0x27, 0x63, 0x29, 0xa7, 0x6d, + 0x54, 0xf0, 0x4c, 0x26, 0x30, 0xc8, 0x65, 0x22, 0x22, 0xc1, 0x15, 0x75, 0xa6, 0x8e, 0x37, 0x0c, + 0x56, 0xdf, 0xc4, 0x83, 0x9d, 0x9c, 0x15, 0x3c, 0xd3, 0xe1, 0xdc, 0xd4, 0x0b, 0x45, 0xac, 0x68, + 0x07, 0x73, 0xc6, 0x56, 0xc7, 0x67, 0xce, 0x63, 0x45, 0x9e, 0xc1, 0xbd, 0x94, 0xa7, 0x33, 0x5e, + 0x84, 0xb6, 0x4b, 0x4c, 0xed, 0x62, 0xea, 0xb6, 0x0d, 0x9c, 0xa1, 0x6e, 0x72, 0x8f, 0x61, 0x90, + 0x72, 0xcd, 0x62, 0xa6, 0x19, 0xed, 0x4d, 0x1d, 0xcf, 0x3d, 0xda, 0xf5, 0xeb, 0xbf, 0xf3, 0xb1, + 0xa2, 0xff, 0x71, 0x19, 0x3f, 0xcb, 0x74, 0x51, 0x05, 0xab, 0x74, 0xf2, 0x0a, 0xb6, 0xa2, 0x82, + 0x33, 0x2d, 0x64, 0x16, 0x9a, 0xdf, 0xa6, 0xfd, 0x69, 0xcb, 0x73, 0x8f, 0x26, 0xbe, 0x9d, 0x89, + 0x5f, 0xcf, 0xc4, 0xbf, 0xa8, 0x67, 0x12, 0x8c, 0xea, 0x0b, 0x46, 0x22, 0x6f, 0x60, 0x27, 0x61, + 0x4a, 0x87, 0x65, 0x1e, 0x33, 0xcd, 0x6d, 0x8d, 0xc1, 0x3f, 0x6b, 0x8c, 0xcd, 0x9d, 0xcf, 0x78, + 0x05, 0xab, 0xec, 0xc1, 0x28, 0x95, 0xb1, 0xb8, 0xac, 0x42, 0x91, 0xc5, 0xfc, 0x2b, 0x1d, 0x4e, + 0x5b, 0x5e, 0x27, 0x70, 0xad, 0x76, 0x6e, 0x24, 0xf2, 0x04, 0xb6, 0x67, 0x65, 0x74, 0xc5, 0x75, + 0x78, 0xc5, 0xab, 0x70, 0xc1, 0xd4, 0x82, 0x02, 0x4e, 0x7d, 0xcb, 0xca, 0x1f, 0x78, 0xf5, 0x9e, + 0xa9, 0x05, 0x39, 0x80, 0x2e, 0x4b, 0x04, 0x53, 0xd4, 0xc5, 0x2e, 0xb6, 0xd7, 0x93, 0x38, 0x35, + 0x72, 0x60, 0xa3, 0xc6, 0x39, 0x43, 0x03, 0x1d, 0x59, 0xe7, 0xcc, 0x79, 0xf2, 0x12, 0xb6, 0x7e, + 0x99, 0x13, 0xd9, 0x01, 0xe7, 0x8a, 0x57, 0x4b, 0xbf, 0xcd, 0x91, 0xdc, 0x87, 0xee, 0x17, 0x96, + 0x94, 0xb5, 0xe3, 0xf6, 0xe3, 0xa4, 0xfd, 0xa2, 0xb5, 0xff, 0xdd, 0x81, 0x9e, 0xb5, 0x84, 0x3c, + 0x85, 0x3e, 0x3e, 0xc2, 0x15, 0x6d, 0xa1, 0x1d, 0x1b, 0x4d, 0xd4, 0xf1, 0x25, 0x50, 0xed, 0x0d, + 0xa0, 0x9c, 0x06, 0x50, 0x27, 0x0d, 0x7b, 0x3b, 0x58, 0xef, 0xd1, 0xba, 0x9e, 0x7d, 0xf2, 0xff, + 0xfd, 0xed, 0xde, 0x81, 0xbf, 0xbd, 0x1b, 0xfb, 0x8b, 0x34, 0x17, 0x73, 0x1e, 0x37, 0x69, 0xee, + 0xd7, 0x34, 0x9b, 0xc0, 0x9a, 0xe6, 0xe6, 0xfe, 0x0c, 0x7e, 0xdb, 0x9f, 0x6b, 0x20, 0x18, 0x5e, + 0x03, 0xc1, 0xed, 0x9c, 0xfc, 0xe1, 0x40, 0x17, 0x6d, 0xda, 0x58, 0xf7, 0x3d, 0x18, 0x45, 0x2c, + 0x93, 0x99, 0x88, 0x58, 0x12, 0xae, 0x7c, 0x73, 0x57, 0xda, 0x79, 0x4c, 0x76, 0x01, 0x52, 0x59, + 0x66, 0x3a, 0x44, 0xba, 0xac, 0x8d, 0x43, 0x54, 0x2e, 0xaa, 0x9c, 0x93, 0x03, 0x18, 0xdb, 0x30, + 0x8b, 0x22, 0xae, 0x94, 0x2c, 0x68, 0xc7, 0xf6, 0x8f, 0xea, 0xe9, 0x52, 0x5c, 0x57, 0xc9, 0x99, + 0x5e, 0xa0, 0x67, 0x75, 0x95, 0x4f, 0x4c, 0x2f, 0xfe, 0xbe, 0xf0, 0xd8, 0xfa, 0x1f, 0x81, 0xa8, + 0x01, 0xeb, 0x37, 0x00, 0xdb, 0x80, 0x64, 0x70, 0x07, 0x90, 0x0c, 0x6f, 0x0c, 0xc9, 0x31, 0x3c, + 0x5c, 0x42, 0x72, 0x59, 0xc8, 0x34, 0x6c, 0x4e, 0x5a, 0x51, 0x40, 0x12, 0x1e, 0xd8, 0x84, 0xb7, + 0x85, 0x4c, 0x5f, 0xaf, 0x87, 0xae, 0x6e, 0xe5, 0xf7, 0xac, 0x87, 0xbd, 0x3d, 0xff, 0x19, 0x00, + 0x00, 0xff, 0xff, 0x8e, 0x4a, 0xc5, 0xdb, 0x1f, 0x06, 0x00, 0x00, } diff --git a/helper/identity/types.proto b/helper/identity/types.proto index 2c27442989..65385712fb 100644 --- a/helper/identity/types.proto +++ b/helper/identity/types.proto @@ -42,6 +42,17 @@ message Group { // the groups belonging to a particular bucket during invalidation of the // storage key. string bucket_key_hash = 10; + + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + Alias alias = 11; + + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + string type = 12; } @@ -108,8 +119,8 @@ message Alias { // ID is the unique identifier that represents this alias string id = 1; - // EntityID is the entity identifier to which this alias belongs to - string entity_id = 2; + // CanonicalID is the entity identifier to which this alias belongs to + string canonical_id = 2; // MountType is the backend mount's type to which this alias belongs to. // This enables categorically querying aliases of specific backend types. @@ -130,8 +141,8 @@ message Alias { map metadata = 6; // Name is the identifier of this alias in its authentication source. - // This does not uniquely identify a alias in Vault. This in conjunction - // with MountAccessor form to be the factors that represent a alias in a + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a // unique way. Aliases will be indexed based on this combined uniqueness // factor. string name = 7; @@ -144,8 +155,6 @@ message Alias { // on its age and to take action on them, if desired. google.protobuf.Timestamp last_update_time = 9; - // MergedFromEntityIDs is the FIFO history of merging activity by entity IDs from - // which this alias is transfered over to the entity to which it - // currently belongs to. - repeated string merged_from_entity_ids = 10; + // MergedFromCanonicalIDs is the FIFO history of merging activity + repeated string merged_from_canonical_ids = 10; } diff --git a/logical/auth.go b/logical/auth.go index c5d184da89..f5310149bc 100644 --- a/logical/auth.go +++ b/logical/auth.go @@ -58,7 +58,13 @@ type Auth struct { // Alias is the information about the authenticated client returned by // the auth backend - Alias *Alias `json:"alias" structs:"alias" mapstructure:"alias"` + Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` } func (a *Auth) GoString() string { diff --git a/vault/core.go b/vault/core.go index 0b731cd7d4..3a155f16e2 100644 --- a/vault/core.go +++ b/vault/core.go @@ -695,7 +695,7 @@ func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntr if te.EntityID != "" { //c.logger.Debug("core: entity set on the token", "entity_id", te.EntityID) // Fetch entity for the entity ID in the token entry - entity, err = c.identityStore.memDBEntityByID(te.EntityID, false) + entity, err = c.identityStore.MemDBEntityByID(te.EntityID, false) if err != nil { c.logger.Error("core: failed to lookup entity using its ID", "error", err) return nil, nil, nil, ErrInternalError @@ -705,7 +705,7 @@ func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntr // If there was no corresponding entity object found, it is // possible that the entity got merged into another entity. Try // finding entity based on the merged entity index. - entity, err = c.identityStore.memDBEntityByMergedEntityID(te.EntityID, false) + entity, err = c.identityStore.MemDBEntityByMergedEntityID(te.EntityID, false) if err != nil { c.logger.Error("core: failed to lookup entity in merged entity ID index", "error", err) return nil, nil, nil, ErrInternalError diff --git a/vault/identity_lookup.go b/vault/identity_lookup.go index 0bc334eb8f..43f58953b8 100644 --- a/vault/identity_lookup.go +++ b/vault/identity_lookup.go @@ -15,13 +15,13 @@ func lookupPaths(i *IdentityStore) []*framework.Path { Fields: map[string]*framework.FieldSchema{ "type": { Type: framework.TypeString, - Description: "Type of lookup. Current supported values are 'by_id' and 'by_name'", + Description: "Type of lookup. Current supported values are 'id' and 'name'", }, - "group_name": { + "name": { Type: framework.TypeString, Description: "Name of the group.", }, - "group_id": { + "id": { Type: framework.TypeString, Description: "ID of the group.", }, @@ -33,6 +33,68 @@ func lookupPaths(i *IdentityStore) []*framework.Path { HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-group"][0]), HelpDescription: strings.TrimSpace(lookupHelp["lookup-group"][1]), }, + { + Pattern: "lookup/entity-alias$", + Fields: map[string]*framework.FieldSchema{ + "type": { + Type: framework.TypeString, + Description: "Type of lookup. Current supported values are 'id', 'canonical_id' and 'factors'.", + }, + "id": { + Type: framework.TypeString, + Description: "ID of the entity.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the entity to which the alias belongs to.", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Accessor of the mount to which the entity alias belongs to.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathLookupEntityAliasUpdate, + }, + + HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-entity-alias"][0]), + HelpDescription: strings.TrimSpace(lookupHelp["lookup-entity-alias"][1]), + }, + { + Pattern: "lookup/group-alias$", + Fields: map[string]*framework.FieldSchema{ + "type": { + Type: framework.TypeString, + Description: "Type of lookup. Current supported values are 'id', 'canonical_id' and 'factors'.", + }, + "id": { + Type: framework.TypeString, + Description: "ID of the group.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the group to which the alias belongs to.", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Accessor of the mount to which the group alias belongs to.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathLookupGroupAliasUpdate, + }, + + HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-group-alias"][0]), + HelpDescription: strings.TrimSpace(lookupHelp["lookup-group-alias"][1]), + }, } } @@ -43,22 +105,22 @@ func (i *IdentityStore) pathLookupGroupUpdate(req *logical.Request, d *framework } switch lookupType { - case "by_id": - groupID := d.Get("group_id").(string) + case "id": + groupID := d.Get("id").(string) if groupID == "" { - return logical.ErrorResponse("empty group_id"), nil + return logical.ErrorResponse("empty ID"), nil } - group, err := i.memDBGroupByID(groupID, false) + group, err := i.MemDBGroupByID(groupID, false) if err != nil { return nil, err } return i.handleGroupReadCommon(group) - case "by_name": - groupName := d.Get("group_name").(string) + case "name": + groupName := d.Get("name").(string) if groupName == "" { - return logical.ErrorResponse("empty group_name"), nil + return logical.ErrorResponse("empty name"), nil } - group, err := i.memDBGroupByName(groupName, false) + group, err := i.MemDBGroupByName(groupName, false) if err != nil { return nil, err } @@ -70,9 +132,99 @@ func (i *IdentityStore) pathLookupGroupUpdate(req *logical.Request, d *framework return nil, nil } +func (i *IdentityStore) pathLookupEntityAliasUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleLookupAliasUpdateCommon(req, d, false) +} + +func (i *IdentityStore) pathLookupGroupAliasUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return i.handleLookupAliasUpdateCommon(req, d, true) +} + +func (i *IdentityStore) handleLookupAliasUpdateCommon(req *logical.Request, d *framework.FieldData, groupAlias bool) (*logical.Response, error) { + lookupType := d.Get("type").(string) + if lookupType == "" { + return logical.ErrorResponse("empty type"), nil + } + + switch lookupType { + case "id": + aliasID := d.Get("id").(string) + if aliasID == "" { + return logical.ErrorResponse("empty ID"), nil + } + + alias, err := i.MemDBAliasByID(aliasID, false, groupAlias) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(alias) + + case "canonical_id": + canonicalID := d.Get("canonical_id").(string) + if canonicalID == "" { + return logical.ErrorResponse("empty canonical_id"), nil + } + + alias, err := i.MemDBAliasByCanonicalID(canonicalID, false, groupAlias) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(alias) + + case "factors": + aliasName := d.Get("name").(string) + if aliasName == "" { + return logical.ErrorResponse("empty name"), nil + } + mountAccessor := d.Get("mount_accessor").(string) + if mountAccessor == "" { + return logical.ErrorResponse("empty 'mount_accessor'"), nil + } + + alias, err := i.MemDBAliasByFactors(mountAccessor, aliasName, false, groupAlias) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(alias) + + default: + return logical.ErrorResponse(fmt.Sprintf("unrecognized type %q", lookupType)), nil + } +} + var lookupHelp = map[string][2]string{ "lookup-group": { - "Query groups based on factors.", - "Currently this supports querying groups by its name or ID.", + "Query groups based on types.", + `Supported types: + - 'id' + To query the group by its ID. This requires 'id' parameter to be set. + - 'name' + To query the group by its name. This requires 'name' parameter to be set. + `, + }, + "lookup-group-alias": { + "Query group alias based on types.", + `Supported types: + - 'id' + To query the group alias by its ID. This requires 'id' parameter to be set. + - 'canonical_id' + To query the group alias by the ID of the group it belongs to. This requires the 'canonical_id' parameter to be set. + - 'factors' + To query the group alias using the factors that uniquely identifies a group alias; its name and the mount accessor. This requires the 'name' and 'mount_accessor' parameters to be set. + `, + }, + "lookup-entity-alias": { + "Query entity alias based on types.", + `Supported types: + - 'id' + To query the entity alias by its ID. This requires 'id' parameter to be set. + - 'canonical_id' + To query the entity alias by the ID of the entity it belongs to. This requires the 'canonical_id' parameter to be set. + - 'factors' + To query the entity alias using the factors that uniquely identifies an entity alias; its name and the mount accessor. This requires the 'name' and 'mount_accessor' parameters to be set. + `, }, } diff --git a/vault/identity_lookup_test.go b/vault/identity_lookup_test.go new file mode 100644 index 0000000000..24ce3f4cdf --- /dev/null +++ b/vault/identity_lookup_test.go @@ -0,0 +1,219 @@ +package vault + +import ( + "testing" + + "github.com/hashicorp/vault/logical" +) + +func TestIdentityStore_Lookup_EntityAlias(t *testing.T) { + var err error + var resp *logical.Response + + i, accessor, _ := testIdentityStoreWithGithubAuth(t) + + entityReq := &logical.Request{ + Path: "entity", + Operation: logical.UpdateOperation, + } + + resp, err = i.HandleRequest(entityReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + entityID := resp.Data["id"].(string) + + entityAliasReq := &logical.Request{ + Path: "entity-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "canonical_id": entityID, + "name": "testentityaliasname", + "mount_type": "ldap", + "mount_accessor": accessor, + }, + } + + resp, err = i.HandleRequest(entityAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + entityAliasID := resp.Data["id"].(string) + + lookupReq := &logical.Request{ + Path: "lookup/entity-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "id", + "id": entityAliasID, + }, + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != entityAliasID { + t.Fatalf("bad: group alias: %#v\n", resp.Data) + } + + lookupReq.Data = map[string]interface{}{ + "type": "factors", + "mount_accessor": accessor, + "name": "testentityaliasname", + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != entityAliasID { + t.Fatalf("bad: entity alias: %#v\n", resp.Data) + } + + entityReq = &logical.Request{ + Path: "entity/id/" + entityID, + Operation: logical.ReadOperation, + } + resp, err = i.HandleRequest(entityReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + + lookupReq.Data = map[string]interface{}{ + "type": "canonical_id", + "canonical_id": entityID, + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != entityAliasID { + t.Fatalf("bad: entity alias: %#v\n", resp.Data) + } +} + +func TestIdentityStore_Lookup_GroupAlias(t *testing.T) { + var err error + var resp *logical.Response + + i, accessor, _ := testIdentityStoreWithGithubAuth(t) + + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "external", + }, + } + + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + groupID := resp.Data["id"].(string) + + groupAliasReq := &logical.Request{ + Path: "group-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "canonical_id": groupID, + "name": "testgroupaliasname", + "mount_type": "ldap", + "mount_accessor": accessor, + }, + } + + resp, err = i.HandleRequest(groupAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + groupAliasID := resp.Data["id"].(string) + + lookupReq := &logical.Request{ + Path: "lookup/group-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "id", + "id": groupAliasID, + }, + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != groupAliasID { + t.Fatalf("bad: group alias: %#v\n", resp.Data) + } + + lookupReq.Data = map[string]interface{}{ + "type": "factors", + "mount_accessor": accessor, + "name": "testgroupaliasname", + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != groupAliasID { + t.Fatalf("bad: group alias: %#v\n", resp.Data) + } + + lookupReq.Data = map[string]interface{}{ + "type": "canonical_id", + "canonical_id": groupID, + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != groupAliasID { + t.Fatalf("bad: group alias: %#v\n", resp.Data) + } +} + +func TestIdentityStore_Lookup_Group(t *testing.T) { + var err error + var resp *logical.Response + + i, _, _ := testIdentityStoreWithGithubAuth(t) + + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + } + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + groupID := resp.Data["id"].(string) + groupName := resp.Data["name"].(string) + + lookupGroupReq := &logical.Request{ + Path: "lookup/group", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "id", + "id": groupID, + }, + } + + resp, err = i.HandleRequest(lookupGroupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != groupID { + t.Fatalf("failed to lookup group") + } + + lookupGroupReq.Data = map[string]interface{}{ + "type": "name", + "name": groupName, + } + + resp, err = i.HandleRequest(lookupGroupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\n err: %#v\n", resp, err) + } + if resp.Data["id"].(string) != groupID { + t.Fatalf("failed to lookup group") + } +} diff --git a/vault/identity_store.go b/vault/identity_store.go index bf1c78b16a..cf62d5daca 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -17,6 +17,10 @@ const ( groupBucketsPrefix = "packer/group/buckets/" ) +func (c *Core) IdentityStore() *IdentityStore { + return c.identityStore +} + // NewIdentityStore creates a new identity store func NewIdentityStore(core *Core, config *logical.BackendConfig) (*IdentityStore, error) { var err error @@ -50,6 +54,7 @@ func NewIdentityStore(core *Core, config *logical.BackendConfig) (*IdentityStore Paths: framework.PathAppend( entityPaths(iStore), aliasPaths(iStore), + groupAliasPaths(iStore), groupPaths(iStore), lookupPaths(iStore), upgradePaths(iStore), @@ -90,7 +95,7 @@ func (i *IdentityStore) Invalidate(key string) { // entry key of the entity bucket. Fetch all the entities that // belong to this bucket using the hash value. Remove these entities // from MemDB along with all the aliases of each entity. - entitiesFetched, err := i.memDBEntitiesByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash)) + entitiesFetched, err := i.MemDBEntitiesByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash)) if err != nil { i.logger.Error("failed to fetch entities using the bucket entry key hash", "bucket_entry_key_hash", bucketKeyHash) return @@ -106,7 +111,7 @@ func (i *IdentityStore) Invalidate(key string) { } // Delete the entity using the same transaction - err = i.memDBDeleteEntityByIDInTxn(txn, entity.ID) + err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID) if err != nil { i.logger.Error("failed to delete entity from MemDB", "entity_id", entity.ID, "error", err) return @@ -160,7 +165,7 @@ func (i *IdentityStore) Invalidate(key string) { txn := i.db.Txn(true) defer txn.Abort() - groupsFetched, err := i.memDBGroupsByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash)) + groupsFetched, err := i.MemDBGroupsByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash)) if err != nil { i.logger.Error("failed to fetch groups using the bucket entry key hash", "bucket_entry_key_hash", bucketKeyHash) return @@ -168,7 +173,7 @@ func (i *IdentityStore) Invalidate(key string) { for _, group := range groupsFetched { // Delete the group using the same transaction - err = i.memDBDeleteGroupByIDInTxn(txn, group.ID) + err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID) if err != nil { i.logger.Error("failed to delete group from MemDB", "group_id", group.ID, "error", err) return @@ -232,9 +237,9 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide return &group, nil } -// EntityByAliasFactors fetches the entity based on factors of alias, i.e mount +// entityByAliasFactors fetches the entity based on factors of alias, i.e mount // accessor and the alias name. -func (i *IdentityStore) EntityByAliasFactors(mountAccessor, aliasName string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) entityByAliasFactors(mountAccessor, aliasName string, clone bool) (*identity.Entity, error) { if mountAccessor == "" { return nil, fmt.Errorf("missing mount accessor") } @@ -243,7 +248,7 @@ func (i *IdentityStore) EntityByAliasFactors(mountAccessor, aliasName string, cl return nil, fmt.Errorf("missing alias name") } - alias, err := i.memDBAliasByFactors(mountAccessor, aliasName, false) + alias, err := i.MemDBAliasByFactors(mountAccessor, aliasName, false, false) if err != nil { return nil, err } @@ -252,11 +257,11 @@ func (i *IdentityStore) EntityByAliasFactors(mountAccessor, aliasName string, cl return nil, nil } - return i.memDBEntityByAliasID(alias.ID, clone) + return i.MemDBEntityByAliasID(alias.ID, clone) } // CreateEntity creates a new entity. This is used by core to -// associate each login attempt by a alias to a unified entity in Vault. +// associate each login attempt by an alias to a unified entity in Vault. func (i *IdentityStore) CreateEntity(alias *logical.Alias) (*identity.Entity, error) { var entity *identity.Entity var err error @@ -279,7 +284,7 @@ func (i *IdentityStore) CreateEntity(alias *logical.Alias) (*identity.Entity, er } // Check if an entity already exists for the given alais - entity, err = i.EntityByAliasFactors(alias.MountAccessor, alias.Name, false) + entity, err = i.entityByAliasFactors(alias.MountAccessor, alias.Name, false) if err != nil { return nil, err } @@ -296,7 +301,7 @@ func (i *IdentityStore) CreateEntity(alias *logical.Alias) (*identity.Entity, er // Create a new alias newAlias := &identity.Alias{ - EntityID: entity.ID, + CanonicalID: entity.ID, Name: alias.Name, MountAccessor: alias.MountAccessor, MountPath: mountValidationResp.MountPath, diff --git a/vault/identity_store_aliases.go b/vault/identity_store_aliases.go index 2593b562e7..a8501479d7 100644 --- a/vault/identity_store_aliases.go +++ b/vault/identity_store_aliases.go @@ -13,10 +13,47 @@ import ( // aliasPaths returns the API endpoints to operate on aliases. // Following are the paths supported: -// alias - To register/modify a alias +// alias - To register/modify an alias // alias/id - To lookup, delete and list aliases based on ID func aliasPaths(i *IdentityStore) []*framework.Path { return []*framework.Path{ + { + Pattern: "entity-alias$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the alias", + }, + // entity_id is deprecated + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias", + }, + "metadata": { + Type: framework.TypeStringSlice, + Description: "Metadata to be associated with the alias. Format should be a list of `key=value` pairs.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathAliasRegister, + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]), + }, + // BC path for identity/entity-alias { Pattern: "alias$", Fields: map[string]*framework.FieldSchema{ @@ -24,10 +61,15 @@ func aliasPaths(i *IdentityStore) []*framework.Path { Type: framework.TypeString, Description: "ID of the alias", }, + // entity_id is deprecated "entity_id": { Type: framework.TypeString, Description: "Entity ID to which this alias belongs to", }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to", + }, "mount_accessor": { Type: framework.TypeString, Description: "Mount accessor to which this alias belongs to", @@ -48,6 +90,45 @@ func aliasPaths(i *IdentityStore) []*framework.Path { HelpSynopsis: strings.TrimSpace(aliasHelp["alias"][0]), HelpDescription: strings.TrimSpace(aliasHelp["alias"][1]), }, + { + Pattern: "entity-alias/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the alias", + }, + // entity_id is deprecated + "entity_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias belongs to", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias should be tied to", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the alias", + }, + "metadata": { + Type: framework.TypeStringSlice, + Description: "Metadata to be associated with the alias. Format should be a comma separated list of `key=value` pairs.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathAliasIDUpdate, + logical.ReadOperation: i.pathAliasIDRead, + logical.DeleteOperation: i.pathAliasIDDelete, + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]), + }, + // BC path for identity/entity-alias/id/ { Pattern: "alias/id/" + framework.GenericNameRegex("id"), Fields: map[string]*framework.FieldSchema{ @@ -55,10 +136,15 @@ func aliasPaths(i *IdentityStore) []*framework.Path { Type: framework.TypeString, Description: "ID of the alias", }, + // entity_id is deprecated "entity_id": { Type: framework.TypeString, Description: "Entity ID to which this alias should be tied to", }, + "canonical_id": { + Type: framework.TypeString, + Description: "Entity ID to which this alias should be tied to", + }, "mount_accessor": { Type: framework.TypeString, Description: "Mount accessor to which this alias belongs to", @@ -81,6 +167,16 @@ func aliasPaths(i *IdentityStore) []*framework.Path { HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id"][0]), HelpDescription: strings.TrimSpace(aliasHelp["alias-id"][1]), }, + { + Pattern: "entity-alias/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathAliasIDList, + }, + + HelpSynopsis: strings.TrimSpace(aliasHelp["alias-id-list"][0]), + HelpDescription: strings.TrimSpace(aliasHelp["alias-id-list"][1]), + }, + // BC path for identity/alias/id { Pattern: "alias/id/?$", Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -103,17 +199,17 @@ func (i *IdentityStore) pathAliasRegister(req *logical.Request, d *framework.Fie return i.handleAliasUpdateCommon(req, d, nil) } -// pathAliasIDUpdate is used to update a alias based on the given +// pathAliasIDUpdate is used to update an alias based on the given // alias ID func (i *IdentityStore) pathAliasIDUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { // Get alias id aliasID := d.Get("id").(string) if aliasID == "" { - return logical.ErrorResponse("missing alias ID"), nil + return logical.ErrorResponse("empty alias ID"), nil } - alias, err := i.memDBAliasByID(aliasID, true) + alias, err := i.MemDBAliasByID(aliasID, true, false) if err != nil { return nil, err } @@ -124,7 +220,7 @@ func (i *IdentityStore) pathAliasIDUpdate(req *logical.Request, d *framework.Fie return i.handleAliasUpdateCommon(req, d, alias) } -// handleAliasUpdateCommon is used to update a alias +// handleAliasUpdateCommon is used to update an alias func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framework.FieldData, alias *identity.Alias) (*logical.Response, error) { var err error var newAlias bool @@ -139,9 +235,13 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo } // Get entity id - entityID := d.Get("entity_id").(string) - if entityID != "" { - entity, err = i.memDBEntityByID(entityID, true) + canonicalID := d.Get("entity_id").(string) + if canonicalID == "" { + canonicalID = d.Get("canonical_id").(string) + } + + if canonicalID != "" { + entity, err = i.MemDBEntityByID(canonicalID, true) if err != nil { return nil, err } @@ -179,7 +279,7 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo } } - aliasByFactors, err := i.memDBAliasByFactors(mountValidationResp.MountAccessor, aliasName, false) + aliasByFactors, err := i.MemDBAliasByFactors(mountValidationResp.MountAccessor, aliasName, false, false) if err != nil { return nil, err } @@ -191,7 +291,7 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo return logical.ErrorResponse("combination of mount and alias name is already in use"), nil } - // If this is a alias being tied to a non-existent entity, create + // If this is an alias being tied to a non-existent entity, create // a new entity for it. if entity == nil { entity = &identity.Entity{ @@ -210,7 +310,7 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo } // Fetch the entity to which the alias is tied to - existingEntity, err := i.memDBEntityByAliasID(alias.ID, true) + existingEntity, err := i.MemDBEntityByAliasID(alias.ID, true) if err != nil { return nil, err } @@ -253,9 +353,9 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo alias.MountAccessor = mountValidationResp.MountAccessor alias.MountPath = mountValidationResp.MountPath - // Set the entity ID in the alias index. This should be done after + // Set the canonical ID in the alias index. This should be done after // sanitizing entity. - alias.EntityID = entity.ID + alias.CanonicalID = entity.ID // ID creation and other validations err = i.sanitizeAlias(alias) @@ -274,14 +374,14 @@ func (i *IdentityStore) handleAliasUpdateCommon(req *logical.Request, d *framewo // Return ID of both alias and entity resp.Data = map[string]interface{}{ - "id": alias.ID, - "entity_id": entity.ID, + "id": alias.ID, + "canonical_id": entity.ID, } return resp, nil } -// pathAliasIDRead returns the properties of a alias for a given +// pathAliasIDRead returns the properties of an alias for a given // alias ID func (i *IdentityStore) pathAliasIDRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { aliasID := d.Get("id").(string) @@ -289,24 +389,28 @@ func (i *IdentityStore) pathAliasIDRead(req *logical.Request, d *framework.Field return logical.ErrorResponse("missing alias id"), nil } - alias, err := i.memDBAliasByID(aliasID, false) + alias, err := i.MemDBAliasByID(aliasID, false, false) if err != nil { return nil, err } + return i.handleAliasReadCommon(alias) +} + +func (i *IdentityStore) handleAliasReadCommon(alias *identity.Alias) (*logical.Response, error) { if alias == nil { return nil, nil } respData := map[string]interface{}{} respData["id"] = alias.ID - respData["entity_id"] = alias.EntityID + respData["canonical_id"] = alias.CanonicalID respData["mount_type"] = alias.MountType respData["mount_accessor"] = alias.MountAccessor respData["mount_path"] = alias.MountPath respData["metadata"] = alias.Metadata respData["name"] = alias.Name - respData["merged_from_entity_ids"] = alias.MergedFromEntityIDs + respData["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs // Convert protobuf timestamp into RFC3339 format respData["creation_time"] = ptypes.TimestampString(alias.CreationTime) @@ -317,7 +421,7 @@ func (i *IdentityStore) pathAliasIDRead(req *logical.Request, d *framework.Field }, nil } -// pathAliasIDDelete deleted the alias for a given alias ID +// pathAliasIDDelete deletes the alias for a given alias ID func (i *IdentityStore) pathAliasIDDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { aliasID := d.Get("id").(string) if aliasID == "" { @@ -331,7 +435,7 @@ func (i *IdentityStore) pathAliasIDDelete(req *logical.Request, d *framework.Fie // store func (i *IdentityStore) pathAliasIDList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { ws := memdb.NewWatchSet() - iter, err := i.memDBAliases(ws) + iter, err := i.MemDBAliases(ws, false) if err != nil { return nil, fmt.Errorf("failed to fetch iterator for aliases in memdb: %v", err) } @@ -350,15 +454,15 @@ func (i *IdentityStore) pathAliasIDList(req *logical.Request, d *framework.Field var aliasHelp = map[string][2]string{ "alias": { - "Create a new alias", + "Create a new alias.", "", }, "alias-id": { - "Update, read or delete an entity using alias ID", + "Update, read or delete an alias ID.", "", }, "alias-id-list": { - "List all the entity IDs", + "List all the entity IDs.", "", }, } diff --git a/vault/identity_store_aliases_test.go b/vault/identity_store_aliases_test.go index a4367f5f1f..926416963e 100644 --- a/vault/identity_store_aliases_test.go +++ b/vault/identity_store_aliases_test.go @@ -27,7 +27,7 @@ func TestIdentityStore_ListAlias(t *testing.T) { } entityID := resp.Data["id"].(string) - // Create a alias + // Create an alias aliasData := map[string]interface{}{ "name": "testaliasname", "mount_accessor": githubAccessor, @@ -82,7 +82,7 @@ func TestIdentityStore_AliasSameAliasNames(t *testing.T) { Data: aliasData, } - // Register a alias + // Register an alias resp, err = is.HandleRequest(aliasReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) @@ -118,13 +118,13 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) - err = is.memDBUpsertEntity(entity) + err = is.MemDBUpsertEntity(entity) if err != nil { t.Fatal(err) } alias := &identity.Alias{ - EntityID: entity.ID, + CanonicalID: entity.ID, ID: "testaliasid", MountAccessor: githubAccessor, MountType: validateMountResp.MountType, @@ -135,12 +135,12 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { }, } - err = is.memDBUpsertAlias(alias) + err = is.MemDBUpsertAlias(alias, false) if err != nil { t.Fatal(err) } - aliasFetched, err := is.memDBAliasByID("testaliasid", false) + aliasFetched, err := is.MemDBAliasByID("testaliasid", false, false) if err != nil { t.Fatal(err) } @@ -149,7 +149,7 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: mismatched aliases; expected: %#v\n actual: %#v\n", alias, aliasFetched) } - aliasFetched, err = is.memDBAliasByEntityID(entity.ID, false) + aliasFetched, err = is.MemDBAliasByCanonicalID(entity.ID, false, false) if err != nil { t.Fatal(err) } @@ -158,7 +158,7 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: mismatched aliases; expected: %#v\n actual: %#v\n", alias, aliasFetched) } - aliasFetched, err = is.memDBAliasByFactors(validateMountResp.MountAccessor, "testaliasname", false) + aliasFetched, err = is.MemDBAliasByFactors(validateMountResp.MountAccessor, "testaliasname", false, false) if err != nil { t.Fatal(err) } @@ -167,9 +167,9 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: mismatched aliases; expected: %#v\n actual: %#v\n", alias, aliasFetched) } - aliasesFetched, err := is.memDBAliasesByMetadata(map[string]string{ + aliasesFetched, err := is.MemDBAliasesByMetadata(map[string]string{ "testkey1": "testmetadatavalue1", - }, false) + }, false, false) if err != nil { t.Fatal(err) } @@ -182,9 +182,9 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: mismatched aliases; expected: %#v\n actual: %#v\n", alias, aliasFetched) } - aliasesFetched, err = is.memDBAliasesByMetadata(map[string]string{ + aliasesFetched, err = is.MemDBAliasesByMetadata(map[string]string{ "testkey2": "testmetadatavalue2", - }, false) + }, false, false) if err != nil { t.Fatal(err) } @@ -197,10 +197,10 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: mismatched aliases; expected: %#v\n actual: %#v\n", alias, aliasFetched) } - aliasesFetched, err = is.memDBAliasesByMetadata(map[string]string{ + aliasesFetched, err = is.MemDBAliasesByMetadata(map[string]string{ "testkey1": "testmetadatavalue1", "testkey2": "testmetadatavalue2", - }, false) + }, false, false) if err != nil { t.Fatal(err) } @@ -214,7 +214,7 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { } alias2 := &identity.Alias{ - EntityID: entity.ID, + CanonicalID: entity.ID, ID: "testaliasid2", MountAccessor: validateMountResp.MountAccessor, MountType: validateMountResp.MountType, @@ -225,14 +225,14 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { }, } - err = is.memDBUpsertAlias(alias2) + err = is.MemDBUpsertAlias(alias2, false) if err != nil { t.Fatal(err) } - aliasesFetched, err = is.memDBAliasesByMetadata(map[string]string{ + aliasesFetched, err = is.MemDBAliasesByMetadata(map[string]string{ "testkey1": "testmetadatavalue1", - }, false) + }, false, false) if err != nil { t.Fatal(err) } @@ -241,9 +241,9 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: length of aliases; expected: 2, actual: %d", len(aliasesFetched)) } - aliasesFetched, err = is.memDBAliasesByMetadata(map[string]string{ + aliasesFetched, err = is.MemDBAliasesByMetadata(map[string]string{ "testkey3": "testmetadatavalue3", - }, false) + }, false, false) if err != nil { t.Fatal(err) } @@ -252,12 +252,12 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { t.Fatalf("bad: length of aliases; expected: 1, actual: %d", len(aliasesFetched)) } - err = is.memDBDeleteAliasByID("testaliasid") + err = is.MemDBDeleteAliasByID("testaliasid", false) if err != nil { t.Fatal(err) } - aliasFetched, err = is.memDBAliasByID("testaliasid", false) + aliasFetched, err = is.MemDBAliasByID("testaliasid", false, false) if err != nil { t.Fatal(err) } @@ -305,7 +305,7 @@ func TestIdentityStore_AliasRegister(t *testing.T) { t.Fatalf("invalid alias id in alias register response") } - entityIDRaw, ok := resp.Data["entity_id"] + entityIDRaw, ok := resp.Data["canonical_id"] if !ok { t.Fatalf("entity id not present in alias register response") } @@ -333,7 +333,7 @@ func TestIdentityStore_AliasUpdate(t *testing.T) { Data: aliasData, } - // This will create a alias and a corresponding entity + // This will create an alias and a corresponding entity resp, err = is.HandleRequest(aliasReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) @@ -508,7 +508,7 @@ func TestIdentityStore_AliasReadDelete(t *testing.T) { } if resp.Data["id"].(string) == "" || - resp.Data["entity_id"].(string) == "" || + resp.Data["canonical_id"].(string) == "" || resp.Data["name"].(string) != registerData["name"] || resp.Data["mount_type"].(string) != "github" { t.Fatalf("bad: alias read response; \nexpected: %#v \nactual: %#v\n", registerData, resp.Data) diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index c691ee755e..aee241a918 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -125,7 +125,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie force := d.Get("force").(bool) - toEntityForLocking, err := i.memDBEntityByID(toEntityID, false) + toEntityForLocking, err := i.MemDBEntityByID(toEntityID, false) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie defer txn.Abort() // Re-read post lock acquisition - toEntity, err := i.memDBEntityByID(toEntityID, true) + toEntity, err := i.MemDBEntityByID(toEntityID, true) if err != nil { return nil, err } @@ -163,7 +163,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie return logical.ErrorResponse("to_entity_id should not be present in from_entity_ids"), nil } - lockFromEntity, err := i.memDBEntityByID(fromEntityID, false) + lockFromEntity, err := i.MemDBEntityByID(fromEntityID, false) if err != nil { return nil, err } @@ -186,7 +186,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie } // Re-read the entities post lock acquisition - fromEntity, err := i.memDBEntityByID(fromEntityID, false) + fromEntity, err := i.MemDBEntityByID(fromEntityID, false) if err != nil { if fromLockHeld { fromEntityLock.Unlock() @@ -209,13 +209,12 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie } for _, alias := range fromEntity.Aliases { - // Set the desired entity id - alias.EntityID = toEntity.ID + // Set the desired canonical ID + alias.CanonicalID = toEntity.ID - // Set the entity id of which this alias is now an alias to - alias.MergedFromEntityIDs = append(alias.MergedFromEntityIDs, fromEntity.ID) + alias.MergedFromCanonicalIDs = append(alias.MergedFromCanonicalIDs, fromEntity.ID) - err = i.memDBUpsertAliasInTxn(txn, alias) + err = i.MemDBUpsertAliasInTxn(txn, alias, false) if err != nil { if fromLockHeld { fromEntityLock.Unlock() @@ -237,7 +236,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie toEntity.MergedEntityIDs = append(toEntity.MergedEntityIDs, fromEntity.ID) // Delete the entity which we are merging from in MemDB using the same transaction - err = i.memDBDeleteEntityByIDInTxn(txn, fromEntity.ID) + err = i.MemDBDeleteEntityByIDInTxn(txn, fromEntity.ID) if err != nil { if fromLockHeld { fromEntityLock.Unlock() @@ -264,7 +263,7 @@ func (i *IdentityStore) pathEntityMergeID(req *logical.Request, d *framework.Fie } // Update MemDB with changes to the entity we are merging to - err = i.memDBUpsertEntityInTxn(txn, toEntity) + err = i.MemDBUpsertEntityInTxn(txn, toEntity) if err != nil { return nil, err } @@ -310,7 +309,7 @@ func (i *IdentityStore) pathEntityIDUpdate(req *logical.Request, d *framework.Fi return logical.ErrorResponse("missing entity id"), nil } - entity, err := i.memDBEntityByID(entityID, true) + entity, err := i.MemDBEntityByID(entityID, true) if err != nil { return nil, err } @@ -342,7 +341,7 @@ func (i *IdentityStore) handleEntityUpdateCommon(req *logical.Request, d *framew // Get the name entityName := d.Get("name").(string) if entityName != "" { - entityByName, err := i.memDBEntityByName(entityName, false) + entityByName, err := i.MemDBEntityByName(entityName, false) if err != nil { return nil, err } @@ -403,7 +402,7 @@ func (i *IdentityStore) pathEntityIDRead(req *logical.Request, d *framework.Fiel return logical.ErrorResponse("missing entity id"), nil } - entity, err := i.memDBEntityByID(entityID, false) + entity, err := i.MemDBEntityByID(entityID, false) if err != nil { return nil, err } @@ -427,13 +426,13 @@ func (i *IdentityStore) pathEntityIDRead(req *logical.Request, d *framework.Fiel for aliasIdx, alias := range entity.Aliases { aliasMap := map[string]interface{}{} aliasMap["id"] = alias.ID - aliasMap["entity_id"] = alias.EntityID + aliasMap["canonical_id"] = alias.CanonicalID aliasMap["mount_type"] = alias.MountType aliasMap["mount_accessor"] = alias.MountAccessor aliasMap["mount_path"] = alias.MountPath aliasMap["metadata"] = alias.Metadata aliasMap["name"] = alias.Name - aliasMap["merged_from_entity_ids"] = alias.MergedFromEntityIDs + aliasMap["merged_from_canonical_ids"] = alias.MergedFromCanonicalIDs aliasMap["creation_time"] = ptypes.TimestampString(alias.CreationTime) aliasMap["last_update_time"] = ptypes.TimestampString(alias.LastUpdateTime) aliasesToReturn[aliasIdx] = aliasMap @@ -464,7 +463,7 @@ func (i *IdentityStore) pathEntityIDDelete(req *logical.Request, d *framework.Fi // store func (i *IdentityStore) pathEntityIDList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { ws := memdb.NewWatchSet() - iter, err := i.memDBEntities(ws) + iter, err := i.MemDBEntities(ws) if err != nil { return nil, fmt.Errorf("failed to fetch iterator for entities in memdb: %v", err) } diff --git a/vault/identity_store_entities_test.go b/vault/identity_store_entities_test.go index b217456e7e..10225fd444 100644 --- a/vault/identity_store_entities_test.go +++ b/vault/identity_store_entities_test.go @@ -70,9 +70,9 @@ func TestIdentityStore_EntityCreateUpdate(t *testing.T) { func TestIdentityStore_CloneImmutability(t *testing.T) { alias := &identity.Alias{ - ID: "testaliasid", - Name: "testaliasname", - MergedFromEntityIDs: []string{"entityid1"}, + ID: "testaliasid", + Name: "testaliasname", + MergedFromCanonicalIDs: []string{"entityid1"}, } entity := &identity.Entity{ @@ -100,9 +100,9 @@ func TestIdentityStore_CloneImmutability(t *testing.T) { t.Fatal(err) } - alias.MergedFromEntityIDs[0] = "invalidid" + alias.MergedFromCanonicalIDs[0] = "invalidid" - if clonedAlias.MergedFromEntityIDs[0] == "invalidid" { + if clonedAlias.MergedFromCanonicalIDs[0] == "invalidid" { t.Fatalf("cloned alias is mutated") } } @@ -117,7 +117,7 @@ func TestIdentityStore_MemDBImmutability(t *testing.T) { } alias1 := &identity.Alias{ - EntityID: "testentityid", + CanonicalID: "testentityid", ID: "testaliasid", MountAccessor: githubAccessor, MountType: validateMountResp.MountType, @@ -141,12 +141,12 @@ func TestIdentityStore_MemDBImmutability(t *testing.T) { entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) - err = is.memDBUpsertEntity(entity) + err = is.MemDBUpsertEntity(entity) if err != nil { t.Fatal(err) } - entityFetched, err := is.memDBEntityByID(entity.ID, true) + entityFetched, err := is.MemDBEntityByID(entity.ID, true) if err != nil { t.Fatal(err) } @@ -154,7 +154,7 @@ func TestIdentityStore_MemDBImmutability(t *testing.T) { // Modify the fetched entity outside of a transaction entityFetched.Aliases[0].ID = "invalidaliasid" - entityFetched, err = is.memDBEntityByID(entity.ID, false) + entityFetched, err = is.MemDBEntityByID(entity.ID, false) if err != nil { t.Fatal(err) } @@ -360,7 +360,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { } alias1 := &identity.Alias{ - EntityID: "testentityid", + CanonicalID: "testentityid", ID: "testaliasid", MountAccessor: githubAccessor, MountType: validateMountResp.MountType, @@ -372,7 +372,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { } alias2 := &identity.Alias{ - EntityID: "testentityid", + CanonicalID: "testentityid", ID: "testaliasid2", MountAccessor: validateMountResp.MountAccessor, MountType: validateMountResp.MountType, @@ -397,13 +397,13 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) - err = is.memDBUpsertEntity(entity) + err = is.MemDBUpsertEntity(entity) if err != nil { t.Fatal(err) } // Fetch the entity using its ID - entityFetched, err := is.memDBEntityByID(entity.ID, false) + entityFetched, err := is.MemDBEntityByID(entity.ID, false) if err != nil { t.Fatal(err) } @@ -413,7 +413,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { } // Fetch the entity using its name - entityFetched, err = is.memDBEntityByName(entity.Name, false) + entityFetched, err = is.MemDBEntityByName(entity.Name, false) if err != nil { t.Fatal(err) } @@ -423,7 +423,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { } // Fetch entities using the metadata - entitiesFetched, err := is.memDBEntitiesByMetadata(map[string]string{ + entitiesFetched, err := is.MemDBEntitiesByMetadata(map[string]string{ "someusefulkey": "someusefulvalue", }, false) if err != nil { @@ -438,7 +438,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { t.Fatalf("entity mismatch; entity: %#v\n entitiesFetched[0]: %#v\n", entity, entitiesFetched[0]) } - entitiesFetched, err = is.memDBEntitiesByBucketEntryKeyHash(entity.BucketKeyHash) + entitiesFetched, err = is.MemDBEntitiesByBucketEntryKeyHash(entity.BucketKeyHash) if err != nil { t.Fatal(err) } @@ -447,12 +447,12 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { t.Fatalf("bad: length of entities; expected: 1, actual: %d", len(entitiesFetched)) } - err = is.memDBDeleteEntityByID(entity.ID) + err = is.MemDBDeleteEntityByID(entity.ID) if err != nil { t.Fatal(err) } - entityFetched, err = is.memDBEntityByID(entity.ID, false) + entityFetched, err = is.MemDBEntityByID(entity.ID, false) if err != nil { t.Fatal(err) } @@ -461,7 +461,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { t.Fatalf("bad: entity; expected: nil, actual: %#v\n", entityFetched) } - entityFetched, err = is.memDBEntityByName(entity.Name, false) + entityFetched, err = is.MemDBEntityByName(entity.Name, false) if err != nil { t.Fatal(err) } @@ -678,7 +678,7 @@ func TestIdentityStore_MergeEntitiesByID(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - entity1, err := is.memDBEntityByID(entityID1, false) + entity1, err := is.MemDBEntityByID(entityID1, false) if err != nil { t.Fatal(err) } @@ -720,7 +720,7 @@ func TestIdentityStore_MergeEntitiesByID(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - entity2, err := is.memDBEntityByID(entityID2, false) + entity2, err := is.MemDBEntityByID(entityID2, false) if err != nil { t.Fatal(err) } @@ -772,7 +772,7 @@ func TestIdentityStore_MergeEntitiesByID(t *testing.T) { for _, aliasRaw := range entity2Aliases { alias := aliasRaw.(map[string]interface{}) - aliasLookedUp, err := is.memDBAliasByID(alias["id"].(string), false) + aliasLookedUp, err := is.MemDBAliasByID(alias["id"].(string), false, false) if err != nil { t.Fatal(err) } diff --git a/vault/identity_store_group_aliases.go b/vault/identity_store_group_aliases.go new file mode 100644 index 0000000000..0f05ff082f --- /dev/null +++ b/vault/identity_store_group_aliases.go @@ -0,0 +1,279 @@ +package vault + +import ( + "fmt" + "strings" + + memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func groupAliasPaths(i *IdentityStore) []*framework.Path { + return []*framework.Path{ + { + Pattern: "group-alias$", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the group alias.", + }, + "name": { + Type: framework.TypeString, + Description: "Alias of the group.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the group to which this is an alias.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupAliasRegister, + }, + + HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias"][0]), + HelpDescription: strings.TrimSpace(groupAliasHelp["group-alias"][1]), + }, + { + Pattern: "group-alias/id/" + framework.GenericNameRegex("id"), + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: "ID of the group alias.", + }, + "name": { + Type: framework.TypeString, + Description: "Alias of the group.", + }, + "mount_accessor": { + Type: framework.TypeString, + Description: "Mount accessor to which this alias belongs to.", + }, + "canonical_id": { + Type: framework.TypeString, + Description: "ID of the group to which this is an alias.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathGroupAliasIDUpdate, + logical.ReadOperation: i.pathGroupAliasIDRead, + logical.DeleteOperation: i.pathGroupAliasIDDelete, + }, + + HelpSynopsis: strings.TrimSpace(groupAliasHelp["group-alias-by-id"][0]), + HelpDescription: strings.TrimSpace(groupHelp["group-alias-by-id"][1]), + }, + { + Pattern: "group-alias/id/?$", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: i.pathGroupAliasIDList, + }, + + HelpSynopsis: strings.TrimSpace(entityHelp["group-alias-id-list"][0]), + HelpDescription: strings.TrimSpace(entityHelp["group-alias-id-list"][1]), + }, + } +} + +func (i *IdentityStore) pathGroupAliasRegister(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + _, ok := d.GetOk("id") + if ok { + return i.pathGroupAliasIDUpdate(req, d) + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + return i.handleGroupAliasUpdateCommon(req, d, nil) +} + +func (i *IdentityStore) pathGroupAliasIDUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("empty group alias ID"), nil + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + groupAlias, err := i.MemDBAliasByID(groupAliasID, true, true) + if err != nil { + return nil, err + } + if groupAlias == nil { + return logical.ErrorResponse("invalid group alias ID"), nil + } + + return i.handleGroupAliasUpdateCommon(req, d, groupAlias) +} + +func (i *IdentityStore) handleGroupAliasUpdateCommon(req *logical.Request, d *framework.FieldData, groupAlias *identity.Alias) (*logical.Response, error) { + var err error + var newGroupAlias bool + var group *identity.Group + + if groupAlias == nil { + groupAlias = &identity.Alias{} + newGroupAlias = true + } + + groupID := d.Get("canonical_id").(string) + if groupID != "" { + group, err = i.MemDBGroupByID(groupID, true) + if err != nil { + return nil, err + } + if group == nil { + return logical.ErrorResponse("invalid group ID"), nil + } + if group.Type != groupTypeExternal { + return logical.ErrorResponse("alias can't be set on an internal group"), nil + } + } + + // Get group alias name + groupAliasName := d.Get("name").(string) + if groupAliasName == "" { + return logical.ErrorResponse("missing alias name"), nil + } + + mountAccessor := d.Get("mount_accessor").(string) + if mountAccessor == "" { + return logical.ErrorResponse("missing mount_accessor"), nil + } + + mountValidationResp := i.validateMountAccessorFunc(mountAccessor) + if mountValidationResp == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid mount accessor %q", mountAccessor)), nil + } + + groupAliasByFactors, err := i.MemDBAliasByFactors(mountValidationResp.MountAccessor, groupAliasName, false, true) + if err != nil { + return nil, err + } + + resp := &logical.Response{} + + if newGroupAlias { + if groupAliasByFactors != nil { + return logical.ErrorResponse("combination of mount and group alias name is already in use"), nil + } + + // If this is an alias being tied to a non-existent group, create + // a new group for it. + if group == nil { + group = &identity.Group{ + Type: groupTypeExternal, + Alias: groupAlias, + } + } else { + group.Alias = groupAlias + } + } else { + // Verify that the combination of group alias name and mount is not + // already tied to a different alias + if groupAliasByFactors != nil && groupAliasByFactors.ID != groupAlias.ID { + return logical.ErrorResponse("combination of mount and group alias name is already in use"), nil + } + + // Fetch the group to which the alias is tied to + existingGroup, err := i.MemDBGroupByAliasID(groupAlias.ID, true) + if err != nil { + return nil, err + } + + if existingGroup == nil { + return nil, fmt.Errorf("group alias is not associated with a group") + } + + if group != nil && group.ID != existingGroup.ID { + return logical.ErrorResponse("alias is already tied to a different group"), nil + } + + group = existingGroup + group.Alias = groupAlias + } + + group.Alias.Name = groupAliasName + group.Alias.MountType = mountValidationResp.MountType + group.Alias.MountAccessor = mountValidationResp.MountAccessor + + err = i.sanitizeAndUpsertGroup(group, nil) + if err != nil { + return nil, err + } + + resp.Data = map[string]interface{}{ + "id": groupAlias.ID, + "canonical_id": group.ID, + } + + return resp, nil +} + +// pathGroupAliasIDRead returns the properties of an alias for a given +// alias ID +func (i *IdentityStore) pathGroupAliasIDRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("empty group alias id"), nil + } + + groupAlias, err := i.MemDBAliasByID(groupAliasID, false, true) + if err != nil { + return nil, err + } + + return i.handleAliasReadCommon(groupAlias) +} + +// pathGroupAliasIDDelete deletes the group's alias for a given group alias ID +func (i *IdentityStore) pathGroupAliasIDDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupAliasID := d.Get("id").(string) + if groupAliasID == "" { + return logical.ErrorResponse("missing group alias ID"), nil + } + + return nil, i.deleteGroupAlias(groupAliasID) +} + +// pathGroupAliasIDList lists the IDs of all the valid group aliases in the +// identity store +func (i *IdentityStore) pathGroupAliasIDList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ws := memdb.NewWatchSet() + iter, err := i.MemDBAliases(ws, true) + if err != nil { + return nil, fmt.Errorf("failed to fetch iterator for group aliases in memdb: %v", err) + } + + var groupAliasIDs []string + for { + raw := iter.Next() + if raw == nil { + break + } + groupAliasIDs = append(groupAliasIDs, raw.(*identity.Alias).ID) + } + + return logical.ListResponse(groupAliasIDs), nil +} + +var groupAliasHelp = map[string][2]string{ + "group-alias": { + "Creates a new group alias, or updates an existing one.", + "", + }, + "group-alias-id": { + "Update, read or delete a group alias using ID.", + "", + }, + "group-alias-id-list": { + "List all the entity IDs.", + "", + }, +} diff --git a/vault/identity_store_group_aliases_test.go b/vault/identity_store_group_aliases_test.go new file mode 100644 index 0000000000..0a5a9aa768 --- /dev/null +++ b/vault/identity_store_group_aliases_test.go @@ -0,0 +1,163 @@ +package vault + +import ( + "testing" + + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/logical" +) + +func TestIdentityStore_GroupAliases_CRUD(t *testing.T) { + var resp *logical.Response + var err error + i, accessor, _ := testIdentityStoreWithGithubAuth(t) + + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "external", + }, + } + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + groupID := resp.Data["id"].(string) + + groupAliasReq := &logical.Request{ + Path: "group-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "name": "testgroupalias", + "mount_accessor": accessor, + "canonical_id": groupID, + "mount_type": "ldap", + }, + } + resp, err = i.HandleRequest(groupAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + groupAliasID := resp.Data["id"].(string) + + groupAliasReq.Path = "group-alias/id/" + groupAliasID + groupAliasReq.Operation = logical.ReadOperation + resp, err = i.HandleRequest(groupAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + + if resp.Data["id"].(string) != groupAliasID { + t.Fatalf("bad: group alias: %#v\n", resp.Data) + } + + groupAliasReq.Operation = logical.DeleteOperation + resp, err = i.HandleRequest(groupAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + + groupAliasReq.Operation = logical.ReadOperation + resp, err = i.HandleRequest(groupAliasReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + + if resp != nil { + t.Fatalf("failed to delete group alias") + } +} + +func TestIdentityStore_GroupAliases_MemDBIndexes(t *testing.T) { + var err error + i, accessor, _ := testIdentityStoreWithGithubAuth(t) + + group := &identity.Group{ + ID: "testgroupid", + Name: "testgroupname", + Metadata: map[string]string{ + "testmetadatakey1": "testmetadatavalue1", + "testmetadatakey2": "testmetadatavalue2", + }, + Alias: &identity.Alias{ + ID: "testgroupaliasid", + Name: "testalias", + MountAccessor: accessor, + CanonicalID: "testgroupid", + MountType: "ldap", + }, + ParentGroupIDs: []string{"testparentgroupid1", "testparentgroupid2"}, + MemberEntityIDs: []string{"testentityid1", "testentityid2"}, + Policies: []string{"testpolicy1", "testpolicy2"}, + BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid"), + } + + err = i.MemDBUpsertAlias(group.Alias, true) + if err != nil { + t.Fatal(err) + } + + err = i.MemDBUpsertGroup(group) + if err != nil { + t.Fatal(err) + } + + alias, err := i.MemDBAliasByID("testgroupaliasid", false, true) + if err != nil { + t.Fatal(err) + } + if alias.ID != "testgroupaliasid" { + t.Fatalf("bad: group alias: %#v\n", alias) + } + + group, err = i.MemDBGroupByAliasID("testgroupaliasid", false) + if err != nil { + t.Fatal(err) + } + if group.ID != "testgroupid" { + t.Fatalf("bad: group: %#v\n", group) + } + + aliasByFactors, err := i.MemDBAliasByFactors(group.Alias.MountAccessor, group.Alias.Name, false, true) + if err != nil { + t.Fatal(err) + } + if aliasByFactors.ID != "testgroupaliasid" { + t.Fatalf("bad: group alias: %#v\n", aliasByFactors) + } +} + +func TestIdentityStore_GroupAliases_AliasOnInternalGroup(t *testing.T) { + var err error + var resp *logical.Response + + i, accessor, _ := testIdentityStoreWithGithubAuth(t) + + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + } + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v; err: %v", resp, err) + } + groupID := resp.Data["id"].(string) + + aliasReq := &logical.Request{ + Path: "group-alias", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "name": "testname", + "mount_accessor": accessor, + "canonical_id": groupID, + }, + } + resp, err = i.HandleRequest(aliasReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error") + } +} diff --git a/vault/identity_store_groups.go b/vault/identity_store_groups.go index 31618f044a..8f78ddd86c 100644 --- a/vault/identity_store_groups.go +++ b/vault/identity_store_groups.go @@ -11,6 +11,11 @@ import ( "github.com/hashicorp/vault/logical/framework" ) +const ( + groupTypeInternal = "internal" + groupTypeExternal = "external" +) + func groupPaths(i *IdentityStore) []*framework.Path { return []*framework.Path{ { @@ -20,6 +25,10 @@ func groupPaths(i *IdentityStore) []*framework.Path { Type: framework.TypeString, Description: "ID of the group.", }, + "type": { + Type: framework.TypeString, + Description: "Type of the group, 'internal' or 'external'. Defaults to 'internal'", + }, "name": { Type: framework.TypeString, Description: "Name of the group.", @@ -55,6 +64,11 @@ func groupPaths(i *IdentityStore) []*framework.Path { Type: framework.TypeString, Description: "ID of the group.", }, + "type": { + Type: framework.TypeString, + Default: groupTypeInternal, + Description: "Type of the group, 'internal' or 'external'. Defaults to 'internal'", + }, "name": { Type: framework.TypeString, Description: "Name of the group.", @@ -118,7 +132,7 @@ func (i *IdentityStore) pathGroupIDUpdate(req *logical.Request, d *framework.Fie i.groupLock.Lock() defer i.groupLock.Unlock() - group, err := i.memDBGroupByID(groupID, true) + group, err := i.MemDBGroupByID(groupID, true) if err != nil { return nil, err } @@ -143,11 +157,30 @@ func (i *IdentityStore) handleGroupUpdateCommon(req *logical.Request, d *framewo group.Policies = policiesRaw.([]string) } + groupTypeRaw, ok := d.GetOk("type") + if ok { + groupType := groupTypeRaw.(string) + if group.Type != "" && groupType != group.Type { + return logical.ErrorResponse(fmt.Sprintf("group type cannot be changed")), nil + } + + group.Type = groupType + } + + // If group type is not set, default to internal type + if group.Type == "" { + group.Type = groupTypeInternal + } + + if group.Type != groupTypeInternal && group.Type != groupTypeExternal { + return logical.ErrorResponse(fmt.Sprintf("invalid group type %q", group.Type)), nil + } + // Get the name groupName := d.Get("name").(string) if groupName != "" { // Check if there is a group already existing for the given name - groupByName, err := i.memDBGroupByName(groupName, false) + groupByName, err := i.MemDBGroupByName(groupName, false) if err != nil { return nil, err } @@ -173,6 +206,9 @@ func (i *IdentityStore) handleGroupUpdateCommon(req *logical.Request, d *framewo memberEntityIDsRaw, ok := d.GetOk("member_entity_ids") if ok { + if group.Type == groupTypeExternal { + return logical.ErrorResponse("member entities can't be set manually for external groups"), nil + } group.MemberEntityIDs = memberEntityIDsRaw.([]string) if len(group.MemberEntityIDs) > 512 { return logical.ErrorResponse("member entity IDs exceeding the limit of 512"), nil @@ -182,6 +218,9 @@ func (i *IdentityStore) handleGroupUpdateCommon(req *logical.Request, d *framewo memberGroupIDsRaw, ok := d.GetOk("member_group_ids") var memberGroupIDs []string if ok { + if group.Type == groupTypeExternal { + return logical.ErrorResponse("member groups can't be set for external groups"), nil + } memberGroupIDs = memberGroupIDsRaw.([]string) } @@ -205,20 +244,17 @@ func (i *IdentityStore) pathGroupIDRead(req *logical.Request, d *framework.Field return logical.ErrorResponse("empty group id"), nil } - group, err := i.memDBGroupByID(groupID, false) + group, err := i.MemDBGroupByID(groupID, false) if err != nil { return nil, err } - if group == nil { - return nil, nil - } return i.handleGroupReadCommon(group) } func (i *IdentityStore) handleGroupReadCommon(group *identity.Group) (*logical.Response, error) { if group == nil { - return nil, fmt.Errorf("nil group") + return nil, nil } respData := map[string]interface{}{} @@ -230,6 +266,23 @@ func (i *IdentityStore) handleGroupReadCommon(group *identity.Group) (*logical.R respData["creation_time"] = ptypes.TimestampString(group.CreationTime) respData["last_update_time"] = ptypes.TimestampString(group.LastUpdateTime) respData["modify_index"] = group.ModifyIndex + respData["type"] = group.Type + + aliasMap := map[string]interface{}{} + if group.Alias != nil { + aliasMap["id"] = group.Alias.ID + aliasMap["canonical_id"] = group.Alias.CanonicalID + aliasMap["mount_type"] = group.Alias.MountType + aliasMap["mount_accessor"] = group.Alias.MountAccessor + aliasMap["mount_path"] = group.Alias.MountPath + aliasMap["metadata"] = group.Alias.Metadata + aliasMap["name"] = group.Alias.Name + aliasMap["merged_from_canonical_ids"] = group.Alias.MergedFromCanonicalIDs + aliasMap["creation_time"] = ptypes.TimestampString(group.Alias.CreationTime) + aliasMap["last_update_time"] = ptypes.TimestampString(group.Alias.LastUpdateTime) + } + + respData["alias"] = aliasMap memberGroupIDs, err := i.memberGroupIDsByID(group.ID) if err != nil { @@ -253,7 +306,7 @@ func (i *IdentityStore) pathGroupIDDelete(req *logical.Request, d *framework.Fie // pathGroupIDList lists the IDs of all the groups in the identity store func (i *IdentityStore) pathGroupIDList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { ws := memdb.NewWatchSet() - iter, err := i.memDBGroupIterator(ws) + iter, err := i.MemDBGroupIterator(ws) if err != nil { return nil, fmt.Errorf("failed to fetch iterator for group in memdb: %v", err) } diff --git a/vault/identity_store_groups_test.go b/vault/identity_store_groups_test.go index 3886f8ef4b..21e382ba79 100644 --- a/vault/identity_store_groups_test.go +++ b/vault/identity_store_groups_test.go @@ -9,6 +9,94 @@ import ( "github.com/hashicorp/vault/logical" ) +func TestIdentityStore_Groups_TypeMembershipAdditions(t *testing.T) { + var err error + var resp *logical.Response + + i, _, _ := testIdentityStoreWithGithubAuth(t) + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "external", + "member_entity_ids": "sampleentityid", + }, + } + + resp, err = i.HandleRequest(groupReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error") + } + + groupReq.Data = map[string]interface{}{ + "type": "external", + "member_group_ids": "samplegroupid", + } + + resp, err = i.HandleRequest(groupReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error") + } +} + +func TestIdentityStore_Groups_TypeImmutability(t *testing.T) { + var err error + var resp *logical.Response + + i, _, _ := testIdentityStoreWithGithubAuth(t) + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + } + + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + internalGroupID := resp.Data["id"].(string) + + groupReq.Data = map[string]interface{}{ + "type": "external", + } + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + externalGroupID := resp.Data["id"].(string) + + // Try to mark internal group as external + groupReq.Data = map[string]interface{}{ + "type": "external", + } + groupReq.Path = "group/id/" + internalGroupID + resp, err = i.HandleRequest(groupReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error") + } + + // Try to mark internal group as external + groupReq.Data = map[string]interface{}{ + "type": "internal", + } + groupReq.Path = "group/id/" + externalGroupID + resp, err = i.HandleRequest(groupReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error") + } +} + func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { var err error i, _, _ := testIdentityStoreWithGithubAuth(t) @@ -28,7 +116,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { } // Insert it into memdb - err = i.memDBUpsertGroup(group) + err = i.MemDBUpsertGroup(group) if err != nil { t.Fatal(err) } @@ -48,7 +136,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { } // Insert it into memdb - err = i.memDBUpsertGroup(group) + err = i.MemDBUpsertGroup(group) if err != nil { t.Fatal(err) } @@ -56,7 +144,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { var fetchedGroup *identity.Group // Fetch group given the name - fetchedGroup, err = i.memDBGroupByName("testgroupname", false) + fetchedGroup, err = i.MemDBGroupByName("testgroupname", false) if err != nil { t.Fatal(err) } @@ -65,7 +153,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { } // Fetch group given the ID - fetchedGroup, err = i.memDBGroupByID("testgroupid", false) + fetchedGroup, err = i.MemDBGroupByID("testgroupid", false) if err != nil { t.Fatal(err) } @@ -75,7 +163,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { var fetchedGroups []*identity.Group // Fetch the subgroups of a given group ID - fetchedGroups, err = i.memDBGroupsByParentGroupID("testparentgroupid1", false) + fetchedGroups, err = i.MemDBGroupsByParentGroupID("testparentgroupid1", false) if err != nil { t.Fatal(err) } @@ -83,7 +171,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { t.Fatalf("failed to fetch an indexed group") } - fetchedGroups, err = i.memDBGroupsByParentGroupID("testparentgroupid2", false) + fetchedGroups, err = i.MemDBGroupsByParentGroupID("testparentgroupid2", false) if err != nil { t.Fatal(err) } @@ -92,7 +180,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { } // Fetch groups based on policy name - fetchedGroups, err = i.memDBGroupsByPolicy("testpolicy1", false) + fetchedGroups, err = i.MemDBGroupsByPolicy("testpolicy1", false) if err != nil { t.Fatal(err) } @@ -100,7 +188,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { t.Fatalf("failed to fetch an indexed group") } - fetchedGroups, err = i.memDBGroupsByPolicy("testpolicy2", false) + fetchedGroups, err = i.MemDBGroupsByPolicy("testpolicy2", false) if err != nil { t.Fatal(err) } @@ -109,7 +197,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { } // Fetch groups based on member entity ID - fetchedGroups, err = i.memDBGroupsByMemberEntityID("testentityid1", false) + fetchedGroups, err = i.MemDBGroupsByMemberEntityID("testentityid1", false, false) if err != nil { t.Fatal(err) } @@ -117,7 +205,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { t.Fatalf("failed to fetch an indexed group") } - fetchedGroups, err = i.memDBGroupsByMemberEntityID("testentityid2", false) + fetchedGroups, err = i.MemDBGroupsByMemberEntityID("testentityid2", false, false) if err != nil { t.Fatal(err) } @@ -203,12 +291,14 @@ func TestIdentityStore_GroupsCreateUpdate(t *testing.T) { }, } expectedData["id"] = resp.Data["id"] + expectedData["type"] = resp.Data["type"] expectedData["name"] = resp.Data["name"] expectedData["member_group_ids"] = resp.Data["member_group_ids"] expectedData["member_entity_ids"] = resp.Data["member_entity_ids"] expectedData["creation_time"] = resp.Data["creation_time"] expectedData["last_update_time"] = resp.Data["last_update_time"] expectedData["modify_index"] = resp.Data["modify_index"] + expectedData["alias"] = resp.Data["alias"] if !reflect.DeepEqual(expectedData, resp.Data) { t.Fatalf("bad: group data;\nexpected: %#v\n actual: %#v\n", expectedData, resp.Data) @@ -321,12 +411,14 @@ func TestIdentityStore_GroupsCRUD_ByID(t *testing.T) { }, } expectedData["id"] = resp.Data["id"] + expectedData["type"] = resp.Data["type"] expectedData["name"] = resp.Data["name"] expectedData["member_group_ids"] = resp.Data["member_group_ids"] expectedData["member_entity_ids"] = resp.Data["member_entity_ids"] expectedData["creation_time"] = resp.Data["creation_time"] expectedData["last_update_time"] = resp.Data["last_update_time"] expectedData["modify_index"] = resp.Data["modify_index"] + expectedData["alias"] = resp.Data["alias"] if !reflect.DeepEqual(expectedData, resp.Data) { t.Fatalf("bad: group data;\nexpected: %#v\n actual: %#v\n", expectedData, resp.Data) @@ -496,7 +588,7 @@ func TestIdentityStore_GroupHierarchyCases(t *testing.T) { var memberGroupIDs []string // Fetch 'eng' group - engGroup, err := is.memDBGroupByID(engGroupID, false) + engGroup, err := is.MemDBGroupByID(engGroupID, false) if err != nil { t.Fatal(err) } @@ -510,7 +602,7 @@ func TestIdentityStore_GroupHierarchyCases(t *testing.T) { t.Fatalf("bad: group membership IDs; expected: %#v\n actual: %#v\n", engMemberGroupIDs, memberGroupIDs) } - vaultGroup, err := is.memDBGroupByID(vaultGroupID, false) + vaultGroup, err := is.MemDBGroupByID(vaultGroupID, false) if err != nil { t.Fatal(err) } @@ -524,7 +616,7 @@ func TestIdentityStore_GroupHierarchyCases(t *testing.T) { t.Fatalf("bad: group membership IDs; expected: %#v\n actual: %#v\n", vaultMemberGroupIDs, memberGroupIDs) } - opsGroup, err := is.memDBGroupByID(opsGroupID, false) + opsGroup, err := is.MemDBGroupByID(opsGroupID, false) if err != nil { t.Fatal(err) } diff --git a/vault/identity_store_schema.go b/vault/identity_store_schema.go index f3026f0e23..33bbae4d8b 100644 --- a/vault/identity_store_schema.go +++ b/vault/identity_store_schema.go @@ -6,15 +6,23 @@ import ( memdb "github.com/hashicorp/go-memdb" ) +const ( + entitiesTable = "entities" + entityAliasesTable = "entity_aliases" + groupsTable = "groups" + groupAliasesTable = "group_aliases" +) + func identityStoreSchema() *memdb.DBSchema { iStoreSchema := &memdb.DBSchema{ Tables: make(map[string]*memdb.TableSchema), } schemas := []func() *memdb.TableSchema{ - entityTableSchema, + entitiesTableSchema, aliasesTableSchema, - groupTableSchema, + groupsTableSchema, + groupAliasesTableSchema, } for _, schemaFunc := range schemas { @@ -30,7 +38,7 @@ func identityStoreSchema() *memdb.DBSchema { func aliasesTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "aliases", + Name: entityAliasesTable, Indexes: map[string]*memdb.IndexSchema{ "id": &memdb.IndexSchema{ Name: "id", @@ -39,11 +47,11 @@ func aliasesTableSchema() *memdb.TableSchema { Field: "ID", }, }, - "entity_id": &memdb.IndexSchema{ - Name: "entity_id", + "canonical_id": &memdb.IndexSchema{ + Name: "canonical_id", Unique: false, Indexer: &memdb.StringFieldIndex{ - Field: "EntityID", + Field: "CanonicalID", }, }, "mount_type": &memdb.IndexSchema{ @@ -79,9 +87,9 @@ func aliasesTableSchema() *memdb.TableSchema { } } -func entityTableSchema() *memdb.TableSchema { +func entitiesTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "entities", + Name: entitiesTable, Indexes: map[string]*memdb.IndexSchema{ "id": &memdb.IndexSchema{ Name: "id", @@ -125,9 +133,9 @@ func entityTableSchema() *memdb.TableSchema { } } -func groupTableSchema() *memdb.TableSchema { +func groupsTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ - Name: "groups", + Name: groupsTable, Indexes: map[string]*memdb.IndexSchema{ "id": { Name: "id", @@ -178,3 +186,46 @@ func groupTableSchema() *memdb.TableSchema { }, } } + +func groupAliasesTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: groupAliasesTable, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + "canonical_id": &memdb.IndexSchema{ + Name: "canonical_id", + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "CanonicalID", + }, + }, + "mount_type": &memdb.IndexSchema{ + Name: "mount_type", + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "MountType", + }, + }, + "factors": &memdb.IndexSchema{ + Name: "factors", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "MountAccessor", + }, + &memdb.StringFieldIndex{ + Field: "Name", + }, + }, + }, + }, + }, + } +} diff --git a/vault/identity_store_structs.go b/vault/identity_store_structs.go index b9020c0289..bc0f07af18 100644 --- a/vault/identity_store_structs.go +++ b/vault/identity_store_structs.go @@ -5,6 +5,7 @@ import ( "sync" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/logical" @@ -73,3 +74,9 @@ type IdentityStore struct { // buckets groupPacker *storagepacker.StoragePacker } + +type groupDiff struct { + New []*identity.Group + Deleted []*identity.Group + Unmodified []*identity.Group +} diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index 9fce0b79d0..239a7b4c1c 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -90,7 +90,7 @@ func TestIdentityStore_EntityByAliasFactors(t *testing.T) { t.Fatalf("expected a non-nil response") } - entity, err := is.EntityByAliasFactors(ghAccessor, "alias_name", false) + entity, err := is.entityByAliasFactors(ghAccessor, "alias_name", false) if err != nil { t.Fatal(err) } diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 0444bf6f8a..b6b5313607 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/helper/strutil" + "github.com/hashicorp/vault/logical" ) // parseMetadata takes in a slice of string and parses each item as a key value pair separated by an '=' sign. @@ -229,7 +230,7 @@ func (i *IdentityStore) LockForEntityID(entityID string) *locksutil.LockEntry { // upsertEntityInTxn either creates or updates an existing entity. The // operations will be updated in both MemDB and storage. If 'persist' is set to -// false, then storage will not be updated. When a alias is transferred from +// false, then storage will not be updated. When an alias is transferred from // one entity to another, both the source and destination entities should get // updated, in which case, callers should send in both entity and // previousEntity. @@ -253,17 +254,17 @@ func (i *IdentityStore) upsertEntityInTxn(txn *memdb.Txn, entity *identity.Entit for _, alias := range entity.Aliases { // Verify that alias is not associated to a different one already - aliasByFactors, err := i.memDBAliasByFactors(alias.MountAccessor, alias.Name, false) + aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, false, false) if err != nil { return err } - if aliasByFactors != nil && aliasByFactors.EntityID != entity.ID { - return fmt.Errorf("alias %q in already tied to a different entity %q", alias.ID, aliasByFactors.EntityID) + if aliasByFactors != nil && aliasByFactors.CanonicalID != entity.ID { + return fmt.Errorf("alias %q in already tied to a different entity %q", alias.ID, aliasByFactors.CanonicalID) } // Insert or update alias in MemDB using the transaction created above - err = i.memDBUpsertAliasInTxn(txn, alias) + err = i.MemDBUpsertAliasInTxn(txn, alias, false) if err != nil { return err } @@ -271,7 +272,7 @@ func (i *IdentityStore) upsertEntityInTxn(txn *memdb.Txn, entity *identity.Entit // If previous entity is set, update it in MemDB and persist it if previousEntity != nil && persist { - err = i.memDBUpsertEntityInTxn(txn, previousEntity) + err = i.MemDBUpsertEntityInTxn(txn, previousEntity) if err != nil { return err } @@ -291,7 +292,7 @@ func (i *IdentityStore) upsertEntityInTxn(txn *memdb.Txn, entity *identity.Entit } // Insert or update entity in MemDB using the transaction created above - err = i.memDBUpsertEntityInTxn(txn, entity) + err = i.MemDBUpsertEntityInTxn(txn, entity) if err != nil { return err } @@ -318,7 +319,7 @@ func (i *IdentityStore) upsertEntityInTxn(txn *memdb.Txn, entity *identity.Entit // upsertEntity either creates or updates an existing entity. The operations // will be updated in both MemDB and storage. If 'persist' is set to false, -// then storage will not be updated. When a alias is transferred from one +// then storage will not be updated. When an alias is transferred from one // entity to another, both the source and destination entities should get // updated, in which case, callers should send in both entity and // previousEntity. @@ -366,7 +367,7 @@ func (i *IdentityStore) deleteEntity(entityID string) error { // Since an entity ID is required to acquire the lock to modify the // storage, fetch the entity without acquiring the lock - lockEntity, err := i.memDBEntityByID(entityID, false) + lockEntity, err := i.MemDBEntityByID(entityID, false) if err != nil { return err } @@ -385,7 +386,7 @@ func (i *IdentityStore) deleteEntity(entityID string) error { defer txn.Abort() // Fetch the entity using its ID - entity, err = i.memDBEntityByIDInTxn(txn, entityID, true) + entity, err = i.MemDBEntityByIDInTxn(txn, entityID, true) if err != nil { return err } @@ -403,7 +404,7 @@ func (i *IdentityStore) deleteEntity(entityID string) error { } // Delete the entity using the same transaction - err = i.memDBDeleteEntityByIDInTxn(txn, entity.ID) + err = i.MemDBDeleteEntityByIDInTxn(txn, entity.ID) if err != nil { return err } @@ -434,7 +435,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { // Fetch the alias using its ID - alias, err = i.memDBAliasByID(aliasID, false) + alias, err = i.MemDBAliasByID(aliasID, false, false) if err != nil { return err } @@ -445,7 +446,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { } // Find the entity to which the alias is tied to - lockEntity, err := i.memDBEntityByAliasID(alias.ID, false) + lockEntity, err := i.MemDBEntityByAliasID(alias.ID, false) if err != nil { return err } @@ -466,7 +467,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { // Fetch the alias again after acquiring the lock using the transaction // created above - alias, err = i.memDBAliasByIDInTxn(txn, aliasID, false) + alias, err = i.MemDBAliasByIDInTxn(txn, aliasID, false, false) if err != nil { return err } @@ -478,7 +479,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { // Fetch the entity again after acquiring the lock using the transaction // created above - entity, err = i.memDBEntityByAliasIDInTxn(txn, alias.ID, true) + entity, err = i.MemDBEntityByAliasIDInTxn(txn, alias.ID, true) if err != nil { return err } @@ -505,7 +506,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { } // Update the entity index in the entities table - err = i.memDBUpsertEntityInTxn(txn, entity) + err = i.MemDBUpsertEntityInTxn(txn, entity) if err != nil { return err } @@ -532,7 +533,7 @@ func (i *IdentityStore) deleteAlias(aliasID string) error { return nil } -func (i *IdentityStore) memDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Alias) error { +func (i *IdentityStore) MemDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Alias, groupAlias bool) error { if txn == nil { return fmt.Errorf("nil txn") } @@ -541,26 +542,31 @@ func (i *IdentityStore) memDBUpsertAliasInTxn(txn *memdb.Txn, alias *identity.Al return fmt.Errorf("alias is nil") } - aliasRaw, err := txn.First("aliases", "id", alias.ID) + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "id", alias.ID) if err != nil { return fmt.Errorf("failed to lookup alias from memdb using alias ID: %v", err) } if aliasRaw != nil { - err = txn.Delete("aliases", aliasRaw) + err = txn.Delete(tableName, aliasRaw) if err != nil { return fmt.Errorf("failed to delete alias from memdb: %v", err) } } - if err := txn.Insert("aliases", alias); err != nil { + if err := txn.Insert(tableName, alias); err != nil { return fmt.Errorf("failed to update alias into memdb: %v", err) } return nil } -func (i *IdentityStore) memDBUpsertAlias(alias *identity.Alias) error { +func (i *IdentityStore) MemDBUpsertAlias(alias *identity.Alias, groupAlias bool) error { if alias == nil { return fmt.Errorf("alias is nil") } @@ -568,7 +574,7 @@ func (i *IdentityStore) memDBUpsertAlias(alias *identity.Alias) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.memDBUpsertAliasInTxn(txn, alias) + err := i.MemDBUpsertAliasInTxn(txn, alias, groupAlias) if err != nil { return err } @@ -578,18 +584,23 @@ func (i *IdentityStore) memDBUpsertAlias(alias *identity.Alias) error { return nil } -func (i *IdentityStore) memDBAliasByEntityIDInTxn(txn *memdb.Txn, entityID string, clone bool) (*identity.Alias, error) { - if entityID == "" { - return nil, fmt.Errorf("missing entity id") +func (i *IdentityStore) MemDBAliasByCanonicalIDInTxn(txn *memdb.Txn, canonicalID string, clone bool, groupAlias bool) (*identity.Alias, error) { + if canonicalID == "" { + return nil, fmt.Errorf("missing canonical ID") } if txn == nil { return nil, fmt.Errorf("txn is nil") } - aliasRaw, err := txn.First("aliases", "entity_id", entityID) + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "canonical_id", canonicalID) if err != nil { - return nil, fmt.Errorf("failed to fetch alias from memdb using entity id: %v", err) + return nil, fmt.Errorf("failed to fetch alias from memdb using canonical ID: %v", err) } if aliasRaw == nil { @@ -608,17 +619,17 @@ func (i *IdentityStore) memDBAliasByEntityIDInTxn(txn *memdb.Txn, entityID strin return alias, nil } -func (i *IdentityStore) memDBAliasByEntityID(entityID string, clone bool) (*identity.Alias, error) { - if entityID == "" { - return nil, fmt.Errorf("missing entity id") +func (i *IdentityStore) MemDBAliasByCanonicalID(canonicalID string, clone bool, groupAlias bool) (*identity.Alias, error) { + if canonicalID == "" { + return nil, fmt.Errorf("missing canonical ID") } txn := i.db.Txn(false) - return i.memDBAliasByEntityIDInTxn(txn, entityID, clone) + return i.MemDBAliasByCanonicalIDInTxn(txn, canonicalID, clone, groupAlias) } -func (i *IdentityStore) memDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Alias, error) { +func (i *IdentityStore) MemDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) { if aliasID == "" { return nil, fmt.Errorf("missing alias ID") } @@ -627,7 +638,12 @@ func (i *IdentityStore) memDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clon return nil, fmt.Errorf("txn is nil") } - aliasRaw, err := txn.First("aliases", "id", aliasID) + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasRaw, err := txn.First(tableName, "id", aliasID) if err != nil { return nil, fmt.Errorf("failed to fetch alias from memdb using alias ID: %v", err) } @@ -648,17 +664,17 @@ func (i *IdentityStore) memDBAliasByIDInTxn(txn *memdb.Txn, aliasID string, clon return alias, nil } -func (i *IdentityStore) memDBAliasByID(aliasID string, clone bool) (*identity.Alias, error) { +func (i *IdentityStore) MemDBAliasByID(aliasID string, clone bool, groupAlias bool) (*identity.Alias, error) { if aliasID == "" { return nil, fmt.Errorf("missing alias ID") } txn := i.db.Txn(false) - return i.memDBAliasByIDInTxn(txn, aliasID, clone) + return i.MemDBAliasByIDInTxn(txn, aliasID, clone, groupAlias) } -func (i *IdentityStore) memDBAliasByFactors(mountAccessor, aliasName string, clone bool) (*identity.Alias, error) { +func (i *IdentityStore) MemDBAliasByFactors(mountAccessor, aliasName string, clone bool, groupAlias bool) (*identity.Alias, error) { if aliasName == "" { return nil, fmt.Errorf("missing alias name") } @@ -667,8 +683,13 @@ func (i *IdentityStore) memDBAliasByFactors(mountAccessor, aliasName string, clo return nil, fmt.Errorf("missing mount accessor") } + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + txn := i.db.Txn(false) - aliasRaw, err := txn.First("aliases", "factors", mountAccessor, aliasName) + aliasRaw, err := txn.First(tableName, "factors", mountAccessor, aliasName) if err != nil { return nil, fmt.Errorf("failed to fetch alias from memdb using factors: %v", err) } @@ -689,7 +710,7 @@ func (i *IdentityStore) memDBAliasByFactors(mountAccessor, aliasName string, clo return alias, nil } -func (i *IdentityStore) memDBAliasesByMetadata(filters map[string]string, clone bool) ([]*identity.Alias, error) { +func (i *IdentityStore) MemDBAliasesByMetadata(filters map[string]string, clone bool, groupAlias bool) ([]*identity.Alias, error) { if filters == nil { return nil, fmt.Errorf("map filter is nil") } @@ -703,7 +724,12 @@ func (i *IdentityStore) memDBAliasesByMetadata(filters map[string]string, clone break } - aliasesIter, err := txn.Get("aliases", "metadata", args...) + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + aliasesIter, err := txn.Get(tableName, "metadata", args...) if err != nil { return nil, fmt.Errorf("failed to lookup aliases using metadata: %v", err) } @@ -724,7 +750,7 @@ func (i *IdentityStore) memDBAliasesByMetadata(filters map[string]string, clone return aliases, nil } -func (i *IdentityStore) memDBDeleteAliasByID(aliasID string) error { +func (i *IdentityStore) MemDBDeleteAliasByID(aliasID string, groupAlias bool) error { if aliasID == "" { return nil } @@ -732,7 +758,7 @@ func (i *IdentityStore) memDBDeleteAliasByID(aliasID string) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.memDBDeleteAliasByIDInTxn(txn, aliasID) + err := i.MemDBDeleteAliasByIDInTxn(txn, aliasID, groupAlias) if err != nil { return err } @@ -742,7 +768,7 @@ func (i *IdentityStore) memDBDeleteAliasByID(aliasID string) error { return nil } -func (i *IdentityStore) memDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string) error { +func (i *IdentityStore) MemDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string, groupAlias bool) error { if aliasID == "" { return nil } @@ -751,7 +777,7 @@ func (i *IdentityStore) memDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string return fmt.Errorf("txn is nil") } - alias, err := i.memDBAliasByIDInTxn(txn, aliasID, false) + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, groupAlias) if err != nil { return err } @@ -760,7 +786,12 @@ func (i *IdentityStore) memDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string return nil } - err = txn.Delete("aliases", alias) + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + err = txn.Delete(tableName, alias) if err != nil { return fmt.Errorf("failed to delete alias from memdb: %v", err) } @@ -768,10 +799,15 @@ func (i *IdentityStore) memDBDeleteAliasByIDInTxn(txn *memdb.Txn, aliasID string return nil } -func (i *IdentityStore) memDBAliases(ws memdb.WatchSet) (memdb.ResultIterator, error) { +func (i *IdentityStore) MemDBAliases(ws memdb.WatchSet, groupAlias bool) (memdb.ResultIterator, error) { txn := i.db.Txn(false) - iter, err := txn.Get("aliases", "id") + tableName := entityAliasesTable + if groupAlias { + tableName = groupAliasesTable + } + + iter, err := txn.Get(tableName, "id") if err != nil { return nil, err } @@ -781,7 +817,7 @@ func (i *IdentityStore) memDBAliases(ws memdb.WatchSet) (memdb.ResultIterator, e return iter, nil } -func (i *IdentityStore) memDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity.Entity) error { +func (i *IdentityStore) MemDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity.Entity) error { if txn == nil { return fmt.Errorf("nil txn") } @@ -790,26 +826,26 @@ func (i *IdentityStore) memDBUpsertEntityInTxn(txn *memdb.Txn, entity *identity. return fmt.Errorf("entity is nil") } - entityRaw, err := txn.First("entities", "id", entity.ID) + entityRaw, err := txn.First(entitiesTable, "id", entity.ID) if err != nil { return fmt.Errorf("failed to lookup entity from memdb using entity id: %v", err) } if entityRaw != nil { - err = txn.Delete("entities", entityRaw) + err = txn.Delete(entitiesTable, entityRaw) if err != nil { return fmt.Errorf("failed to delete entity from memdb: %v", err) } } - if err := txn.Insert("entities", entity); err != nil { + if err := txn.Insert(entitiesTable, entity); err != nil { return fmt.Errorf("failed to update entity into memdb: %v", err) } return nil } -func (i *IdentityStore) memDBUpsertEntity(entity *identity.Entity) error { +func (i *IdentityStore) MemDBUpsertEntity(entity *identity.Entity) error { if entity == nil { return fmt.Errorf("entity to upsert is nil") } @@ -817,7 +853,7 @@ func (i *IdentityStore) memDBUpsertEntity(entity *identity.Entity) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.memDBUpsertEntityInTxn(txn, entity) + err := i.MemDBUpsertEntityInTxn(txn, entity) if err != nil { return err } @@ -827,7 +863,7 @@ func (i *IdentityStore) memDBUpsertEntity(entity *identity.Entity) error { return nil } -func (i *IdentityStore) memDBEntityByIDInTxn(txn *memdb.Txn, entityID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByIDInTxn(txn *memdb.Txn, entityID string, clone bool) (*identity.Entity, error) { if entityID == "" { return nil, fmt.Errorf("missing entity id") } @@ -836,7 +872,7 @@ func (i *IdentityStore) memDBEntityByIDInTxn(txn *memdb.Txn, entityID string, cl return nil, fmt.Errorf("txn is nil") } - entityRaw, err := txn.First("entities", "id", entityID) + entityRaw, err := txn.First(entitiesTable, "id", entityID) if err != nil { return nil, fmt.Errorf("failed to fetch entity from memdb using entity id: %v", err) } @@ -857,17 +893,17 @@ func (i *IdentityStore) memDBEntityByIDInTxn(txn *memdb.Txn, entityID string, cl return entity, nil } -func (i *IdentityStore) memDBEntityByID(entityID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByID(entityID string, clone bool) (*identity.Entity, error) { if entityID == "" { return nil, fmt.Errorf("missing entity id") } txn := i.db.Txn(false) - return i.memDBEntityByIDInTxn(txn, entityID, clone) + return i.MemDBEntityByIDInTxn(txn, entityID, clone) } -func (i *IdentityStore) memDBEntityByNameInTxn(txn *memdb.Txn, entityName string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByNameInTxn(txn *memdb.Txn, entityName string, clone bool) (*identity.Entity, error) { if entityName == "" { return nil, fmt.Errorf("missing entity name") } @@ -876,7 +912,7 @@ func (i *IdentityStore) memDBEntityByNameInTxn(txn *memdb.Txn, entityName string return nil, fmt.Errorf("txn is nil") } - entityRaw, err := txn.First("entities", "name", entityName) + entityRaw, err := txn.First(entitiesTable, "name", entityName) if err != nil { return nil, fmt.Errorf("failed to fetch entity from memdb using entity name: %v", err) } @@ -897,17 +933,17 @@ func (i *IdentityStore) memDBEntityByNameInTxn(txn *memdb.Txn, entityName string return entity, nil } -func (i *IdentityStore) memDBEntityByName(entityName string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByName(entityName string, clone bool) (*identity.Entity, error) { if entityName == "" { return nil, fmt.Errorf("missing entity name") } txn := i.db.Txn(false) - return i.memDBEntityByNameInTxn(txn, entityName, clone) + return i.MemDBEntityByNameInTxn(txn, entityName, clone) } -func (i *IdentityStore) memDBEntitiesByMetadata(filters map[string]string, clone bool) ([]*identity.Entity, error) { +func (i *IdentityStore) MemDBEntitiesByMetadata(filters map[string]string, clone bool) ([]*identity.Entity, error) { if filters == nil { return nil, fmt.Errorf("map filter is nil") } @@ -921,7 +957,7 @@ func (i *IdentityStore) memDBEntitiesByMetadata(filters map[string]string, clone break } - entitiesIter, err := txn.Get("entities", "metadata", args...) + entitiesIter, err := txn.Get(entitiesTable, "metadata", args...) if err != nil { return nil, fmt.Errorf("failed to lookup entities using metadata: %v", err) } @@ -942,7 +978,7 @@ func (i *IdentityStore) memDBEntitiesByMetadata(filters map[string]string, clone return entities, nil } -func (i *IdentityStore) memDBEntitiesByBucketEntryKeyHash(hashValue string) ([]*identity.Entity, error) { +func (i *IdentityStore) MemDBEntitiesByBucketEntryKeyHash(hashValue string) ([]*identity.Entity, error) { if hashValue == "" { return nil, fmt.Errorf("empty hash value") } @@ -950,10 +986,10 @@ func (i *IdentityStore) memDBEntitiesByBucketEntryKeyHash(hashValue string) ([]* txn := i.db.Txn(false) defer txn.Abort() - return i.memDBEntitiesByBucketEntryKeyHashInTxn(txn, hashValue) + return i.MemDBEntitiesByBucketEntryKeyHashInTxn(txn, hashValue) } -func (i *IdentityStore) memDBEntitiesByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Entity, error) { +func (i *IdentityStore) MemDBEntitiesByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Entity, error) { if txn == nil { return nil, fmt.Errorf("nil txn") } @@ -962,7 +998,7 @@ func (i *IdentityStore) memDBEntitiesByBucketEntryKeyHashInTxn(txn *memdb.Txn, h return nil, fmt.Errorf("empty hash value") } - entitiesIter, err := txn.Get("entities", "bucket_key_hash", hashValue) + entitiesIter, err := txn.Get(entitiesTable, "bucket_key_hash", hashValue) if err != nil { return nil, fmt.Errorf("failed to lookup entities using bucket entry key hash: %v", err) } @@ -975,7 +1011,7 @@ func (i *IdentityStore) memDBEntitiesByBucketEntryKeyHashInTxn(txn *memdb.Txn, h return entities, nil } -func (i *IdentityStore) memDBEntityByMergedEntityIDInTxn(txn *memdb.Txn, mergedEntityID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByMergedEntityIDInTxn(txn *memdb.Txn, mergedEntityID string, clone bool) (*identity.Entity, error) { if mergedEntityID == "" { return nil, fmt.Errorf("missing merged entity id") } @@ -984,7 +1020,7 @@ func (i *IdentityStore) memDBEntityByMergedEntityIDInTxn(txn *memdb.Txn, mergedE return nil, fmt.Errorf("txn is nil") } - entityRaw, err := txn.First("entities", "merged_entity_ids", mergedEntityID) + entityRaw, err := txn.First(entitiesTable, "merged_entity_ids", mergedEntityID) if err != nil { return nil, fmt.Errorf("failed to fetch entity from memdb using merged entity id: %v", err) } @@ -1005,17 +1041,17 @@ func (i *IdentityStore) memDBEntityByMergedEntityIDInTxn(txn *memdb.Txn, mergedE return entity, nil } -func (i *IdentityStore) memDBEntityByMergedEntityID(mergedEntityID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByMergedEntityID(mergedEntityID string, clone bool) (*identity.Entity, error) { if mergedEntityID == "" { return nil, fmt.Errorf("missing merged entity id") } txn := i.db.Txn(false) - return i.memDBEntityByMergedEntityIDInTxn(txn, mergedEntityID, clone) + return i.MemDBEntityByMergedEntityIDInTxn(txn, mergedEntityID, clone) } -func (i *IdentityStore) memDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Entity, error) { if aliasID == "" { return nil, fmt.Errorf("missing alias ID") } @@ -1024,7 +1060,7 @@ func (i *IdentityStore) memDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string return nil, fmt.Errorf("txn is nil") } - alias, err := i.memDBAliasByIDInTxn(txn, aliasID, false) + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, false) if err != nil { return nil, err } @@ -1033,20 +1069,20 @@ func (i *IdentityStore) memDBEntityByAliasIDInTxn(txn *memdb.Txn, aliasID string return nil, nil } - return i.memDBEntityByIDInTxn(txn, alias.EntityID, clone) + return i.MemDBEntityByIDInTxn(txn, alias.CanonicalID, clone) } -func (i *IdentityStore) memDBEntityByAliasID(aliasID string, clone bool) (*identity.Entity, error) { +func (i *IdentityStore) MemDBEntityByAliasID(aliasID string, clone bool) (*identity.Entity, error) { if aliasID == "" { return nil, fmt.Errorf("missing alias ID") } txn := i.db.Txn(false) - return i.memDBEntityByAliasIDInTxn(txn, aliasID, clone) + return i.MemDBEntityByAliasIDInTxn(txn, aliasID, clone) } -func (i *IdentityStore) memDBDeleteEntityByID(entityID string) error { +func (i *IdentityStore) MemDBDeleteEntityByID(entityID string) error { if entityID == "" { return nil } @@ -1054,7 +1090,7 @@ func (i *IdentityStore) memDBDeleteEntityByID(entityID string) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.memDBDeleteEntityByIDInTxn(txn, entityID) + err := i.MemDBDeleteEntityByIDInTxn(txn, entityID) if err != nil { return err } @@ -1064,7 +1100,7 @@ func (i *IdentityStore) memDBDeleteEntityByID(entityID string) error { return nil } -func (i *IdentityStore) memDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error { +func (i *IdentityStore) MemDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID string) error { if entityID == "" { return nil } @@ -1073,7 +1109,7 @@ func (i *IdentityStore) memDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID stri return fmt.Errorf("txn is nil") } - entity, err := i.memDBEntityByIDInTxn(txn, entityID, false) + entity, err := i.MemDBEntityByIDInTxn(txn, entityID, false) if err != nil { return err } @@ -1082,7 +1118,7 @@ func (i *IdentityStore) memDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID stri return nil } - err = txn.Delete("entities", entity) + err = txn.Delete(entitiesTable, entity) if err != nil { return fmt.Errorf("failed to delete entity from memdb: %v", err) } @@ -1090,10 +1126,10 @@ func (i *IdentityStore) memDBDeleteEntityByIDInTxn(txn *memdb.Txn, entityID stri return nil } -func (i *IdentityStore) memDBEntities(ws memdb.WatchSet) (memdb.ResultIterator, error) { +func (i *IdentityStore) MemDBEntities(ws memdb.WatchSet) (memdb.ResultIterator, error) { txn := i.db.Txn(false) - iter, err := txn.Get("entities", "id") + iter, err := txn.Get(entitiesTable, "id") if err != nil { return nil, err } @@ -1110,9 +1146,9 @@ func (i *IdentityStore) sanitizeAlias(alias *identity.Alias) error { return fmt.Errorf("alias is nil") } - // Alias must always be tied to an entity - if alias.EntityID == "" { - return fmt.Errorf("missing entity ID") + // Alias must always be tied to a canonical object + if alias.CanonicalID == "" { + return fmt.Errorf("missing canonical ID") } // Alias must have a name @@ -1244,7 +1280,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(group *identity.Group, memberGrou // After the group lock is held, make membership updates to all the // relevant groups for _, memberGroupID := range memberGroupIDs { - memberGroup, err := i.memDBGroupByID(memberGroupID, true) + memberGroup, err := i.MemDBGroupByID(memberGroupID, true) if err != nil { return err } @@ -1277,6 +1313,21 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(group *identity.Group, memberGrou } } + // Sanitize the group alias + if group.Alias != nil { + group.Alias.CanonicalID = group.ID + + err = i.sanitizeAlias(group.Alias) + if err != nil { + return err + } + + err = i.MemDBUpsertAliasInTxn(txn, group.Alias, true) + if err != nil { + return err + } + } + err = i.upsertGroupInTxn(txn, group, true) if err != nil { return err @@ -1288,7 +1339,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(group *identity.Group, memberGrou } func (i *IdentityStore) validateMemberGroupID(groupID string, memberGroupID string) error { - group, err := i.memDBGroupByID(groupID, true) + group, err := i.MemDBGroupByID(groupID, true) if err != nil { return err } @@ -1322,7 +1373,7 @@ func (i *IdentityStore) validateMemberGroupID(groupID string, memberGroupID stri } func (i *IdentityStore) validateEntityID(entityID string) error { - entity, err := i.memDBEntityByID(entityID, false) + entity, err := i.MemDBEntityByID(entityID, false) if err != nil { return fmt.Errorf("failed to validate entity ID %q: %v", entityID, err) } @@ -1333,7 +1384,7 @@ func (i *IdentityStore) validateEntityID(entityID string) error { } func (i *IdentityStore) validateGroupID(groupID string) error { - group, err := i.memDBGroupByID(groupID, false) + group, err := i.MemDBGroupByID(groupID, false) if err != nil { return fmt.Errorf("failed to validate group ID %q: %v", groupID, err) } @@ -1368,14 +1419,14 @@ func (i *IdentityStore) deleteAliasesInEntityInTxn(txn *memdb.Txn, entity *ident // Remove identity indices from aliases table for those that needs to // be removed for _, alias := range removeList { - aliasToBeRemoved, err := i.memDBAliasByIDInTxn(txn, alias.ID, false) + aliasToBeRemoved, err := i.MemDBAliasByIDInTxn(txn, alias.ID, false, false) if err != nil { return err } if aliasToBeRemoved == nil { return fmt.Errorf("alias was not indexed") } - err = i.memDBDeleteAliasByIDInTxn(txn, aliasToBeRemoved.ID) + err = i.MemDBDeleteAliasByIDInTxn(txn, aliasToBeRemoved.ID, false) if err != nil { return err } @@ -1475,7 +1526,7 @@ func satisfiesMetadataFilters(meta map[string]string, filters map[string]string) return true } -func (i *IdentityStore) memDBGroupByNameInTxn(txn *memdb.Txn, groupName string, clone bool) (*identity.Group, error) { +func (i *IdentityStore) MemDBGroupByNameInTxn(txn *memdb.Txn, groupName string, clone bool) (*identity.Group, error) { if groupName == "" { return nil, fmt.Errorf("missing group name") } @@ -1484,7 +1535,7 @@ func (i *IdentityStore) memDBGroupByNameInTxn(txn *memdb.Txn, groupName string, return nil, fmt.Errorf("txn is nil") } - groupRaw, err := txn.First("groups", "name", groupName) + groupRaw, err := txn.First(groupsTable, "name", groupName) if err != nil { return nil, fmt.Errorf("failed to fetch group from memdb using group name: %v", err) } @@ -1505,14 +1556,27 @@ func (i *IdentityStore) memDBGroupByNameInTxn(txn *memdb.Txn, groupName string, return group, nil } -func (i *IdentityStore) memDBGroupByName(groupName string, clone bool) (*identity.Group, error) { +func (i *IdentityStore) MemDBGroupByName(groupName string, clone bool) (*identity.Group, error) { if groupName == "" { return nil, fmt.Errorf("missing group name") } txn := i.db.Txn(false) - return i.memDBGroupByNameInTxn(txn, groupName, clone) + return i.MemDBGroupByNameInTxn(txn, groupName, clone) +} + +func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error { + txn := i.db.Txn(true) + defer txn.Abort() + + err := i.upsertGroupInTxn(txn, group, persist) + if err != nil { + return err + } + + txn.Commit() + return nil } func (i *IdentityStore) upsertGroupInTxn(txn *memdb.Txn, group *identity.Group, persist bool) error { @@ -1530,7 +1594,7 @@ func (i *IdentityStore) upsertGroupInTxn(txn *memdb.Txn, group *identity.Group, group.ModifyIndex++ // Insert or update group in MemDB using the transaction created above - err = i.memDBUpsertGroupInTxn(txn, group) + err = i.MemDBUpsertGroupInTxn(txn, group) if err != nil { return err } @@ -1555,11 +1619,11 @@ func (i *IdentityStore) upsertGroupInTxn(txn *memdb.Txn, group *identity.Group, return nil } -func (i *IdentityStore) memDBUpsertGroup(group *identity.Group) error { +func (i *IdentityStore) MemDBUpsertGroup(group *identity.Group) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.memDBUpsertGroupInTxn(txn, group) + err := i.MemDBUpsertGroupInTxn(txn, group) if err != nil { return err } @@ -1569,7 +1633,7 @@ func (i *IdentityStore) memDBUpsertGroup(group *identity.Group) error { return nil } -func (i *IdentityStore) memDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Group) error { +func (i *IdentityStore) MemDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Group) error { if txn == nil { return fmt.Errorf("nil txn") } @@ -1578,19 +1642,19 @@ func (i *IdentityStore) memDBUpsertGroupInTxn(txn *memdb.Txn, group *identity.Gr return fmt.Errorf("group is nil") } - groupRaw, err := txn.First("groups", "id", group.ID) + groupRaw, err := txn.First(groupsTable, "id", group.ID) if err != nil { return fmt.Errorf("failed to lookup group from memdb using group id: %v", err) } if groupRaw != nil { - err = txn.Delete("groups", groupRaw) + err = txn.Delete(groupsTable, groupRaw) if err != nil { return fmt.Errorf("failed to delete group from memdb: %v", err) } } - if err := txn.Insert("groups", group); err != nil { + if err := txn.Insert(groupsTable, group); err != nil { return fmt.Errorf("failed to update group into memdb: %v", err) } @@ -1613,7 +1677,7 @@ func (i *IdentityStore) deleteGroupByID(groupID string) error { txn := i.db.Txn(true) defer txn.Abort() - group, err = i.memDBGroupByIDInTxn(txn, groupID, false) + group, err = i.MemDBGroupByIDInTxn(txn, groupID, false) if err != nil { return err } @@ -1624,7 +1688,7 @@ func (i *IdentityStore) deleteGroupByID(groupID string) error { } // Delete the group using the same transaction - err = i.memDBDeleteGroupByIDInTxn(txn, group.ID) + err = i.MemDBDeleteGroupByIDInTxn(txn, group.ID) if err != nil { return err } @@ -1641,7 +1705,7 @@ func (i *IdentityStore) deleteGroupByID(groupID string) error { return nil } -func (i *IdentityStore) memDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string) error { +func (i *IdentityStore) MemDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string) error { if groupID == "" { return nil } @@ -1650,7 +1714,7 @@ func (i *IdentityStore) memDBDeleteGroupByIDInTxn(txn *memdb.Txn, groupID string return fmt.Errorf("txn is nil") } - group, err := i.memDBGroupByIDInTxn(txn, groupID, false) + group, err := i.MemDBGroupByIDInTxn(txn, groupID, false) if err != nil { return err } @@ -1684,7 +1748,7 @@ func (i *IdentityStore) deleteGroupByName(groupName string) error { defer txn.Abort() // Fetch the group using its ID - group, err = i.memDBGroupByNameInTxn(txn, groupName, false) + group, err = i.MemDBGroupByNameInTxn(txn, groupName, false) if err != nil { return err } @@ -1695,7 +1759,7 @@ func (i *IdentityStore) deleteGroupByName(groupName string) error { } // Delete the group using the same transaction - err = i.memDBDeleteGroupByNameInTxn(txn, group.Name) + err = i.MemDBDeleteGroupByNameInTxn(txn, group.Name) if err != nil { return err } @@ -1712,7 +1776,7 @@ func (i *IdentityStore) deleteGroupByName(groupName string) error { return nil } -func (i *IdentityStore) memDBDeleteGroupByNameInTxn(txn *memdb.Txn, groupName string) error { +func (i *IdentityStore) MemDBDeleteGroupByNameInTxn(txn *memdb.Txn, groupName string) error { if groupName == "" { return nil } @@ -1721,7 +1785,7 @@ func (i *IdentityStore) memDBDeleteGroupByNameInTxn(txn *memdb.Txn, groupName st return fmt.Errorf("txn is nil") } - group, err := i.memDBGroupByNameInTxn(txn, groupName, false) + group, err := i.MemDBGroupByNameInTxn(txn, groupName, false) if err != nil { return err } @@ -1730,7 +1794,7 @@ func (i *IdentityStore) memDBDeleteGroupByNameInTxn(txn *memdb.Txn, groupName st return nil } - err = txn.Delete("groups", group) + err = txn.Delete(groupsTable, group) if err != nil { return fmt.Errorf("failed to delete group from memdb: %v", err) } @@ -1738,7 +1802,7 @@ func (i *IdentityStore) memDBDeleteGroupByNameInTxn(txn *memdb.Txn, groupName st return nil } -func (i *IdentityStore) memDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clone bool) (*identity.Group, error) { +func (i *IdentityStore) MemDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clone bool) (*identity.Group, error) { if groupID == "" { return nil, fmt.Errorf("missing group ID") } @@ -1747,7 +1811,7 @@ func (i *IdentityStore) memDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clon return nil, fmt.Errorf("txn is nil") } - groupRaw, err := txn.First("groups", "id", groupID) + groupRaw, err := txn.First(groupsTable, "id", groupID) if err != nil { return nil, fmt.Errorf("failed to fetch group from memdb using group ID: %v", err) } @@ -1768,22 +1832,22 @@ func (i *IdentityStore) memDBGroupByIDInTxn(txn *memdb.Txn, groupID string, clon return group, nil } -func (i *IdentityStore) memDBGroupByID(groupID string, clone bool) (*identity.Group, error) { +func (i *IdentityStore) MemDBGroupByID(groupID string, clone bool) (*identity.Group, error) { if groupID == "" { return nil, fmt.Errorf("missing group ID") } txn := i.db.Txn(false) - return i.memDBGroupByIDInTxn(txn, groupID, clone) + return i.MemDBGroupByIDInTxn(txn, groupID, clone) } -func (i *IdentityStore) memDBGroupsByPolicyInTxn(txn *memdb.Txn, policyName string, clone bool) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByPolicyInTxn(txn *memdb.Txn, policyName string, clone bool) ([]*identity.Group, error) { if policyName == "" { return nil, fmt.Errorf("missing policy name") } - groupsIter, err := txn.Get("groups", "policies", policyName) + groupsIter, err := txn.Get(groupsTable, "policies", policyName) if err != nil { return nil, fmt.Errorf("failed to lookup groups using policy name: %v", err) } @@ -1803,22 +1867,22 @@ func (i *IdentityStore) memDBGroupsByPolicyInTxn(txn *memdb.Txn, policyName stri return groups, nil } -func (i *IdentityStore) memDBGroupsByPolicy(policyName string, clone bool) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByPolicy(policyName string, clone bool) ([]*identity.Group, error) { if policyName == "" { return nil, fmt.Errorf("missing policy name") } txn := i.db.Txn(false) - return i.memDBGroupsByPolicyInTxn(txn, policyName, clone) + return i.MemDBGroupsByPolicyInTxn(txn, policyName, clone) } -func (i *IdentityStore) memDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGroupID string, clone bool) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGroupID string, clone bool) ([]*identity.Group, error) { if memberGroupID == "" { return nil, fmt.Errorf("missing member group ID") } - groupsIter, err := txn.Get("groups", "parent_group_ids", memberGroupID) + groupsIter, err := txn.Get(groupsTable, "parent_group_ids", memberGroupID) if err != nil { return nil, fmt.Errorf("failed to lookup groups using member group ID: %v", err) } @@ -1838,25 +1902,29 @@ func (i *IdentityStore) memDBGroupsByParentGroupIDInTxn(txn *memdb.Txn, memberGr return groups, nil } -func (i *IdentityStore) memDBGroupsByParentGroupID(memberGroupID string, clone bool) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByParentGroupID(memberGroupID string, clone bool) ([]*identity.Group, error) { if memberGroupID == "" { return nil, fmt.Errorf("missing member group ID") } txn := i.db.Txn(false) - return i.memDBGroupsByParentGroupIDInTxn(txn, memberGroupID, clone) + return i.MemDBGroupsByParentGroupIDInTxn(txn, memberGroupID, clone) } -func (i *IdentityStore) memDBGroupsByMemberEntityID(entityID string, clone bool) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByMemberEntityID(entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) { + txn := i.db.Txn(false) + defer txn.Abort() + + return i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, clone, externalOnly) +} + +func (i *IdentityStore) MemDBGroupsByMemberEntityIDInTxn(txn *memdb.Txn, entityID string, clone bool, externalOnly bool) ([]*identity.Group, error) { if entityID == "" { return nil, fmt.Errorf("missing entity ID") } - txn := i.db.Txn(false) - defer txn.Abort() - - groupsIter, err := txn.Get("groups", "member_entity_ids", entityID) + groupsIter, err := txn.Get(groupsTable, "member_entity_ids", entityID) if err != nil { return nil, fmt.Errorf("failed to lookup groups using entity ID: %v", err) } @@ -1864,6 +1932,9 @@ func (i *IdentityStore) memDBGroupsByMemberEntityID(entityID string, clone bool) var groups []*identity.Group for group := groupsIter.Next(); group != nil; group = groupsIter.Next() { entry := group.(*identity.Group) + if externalOnly && entry.Type == groupTypeInternal { + continue + } if clone { entry, err = entry.Clone() if err != nil { @@ -1881,7 +1952,7 @@ func (i *IdentityStore) groupPoliciesByEntityID(entityID string) ([]string, erro return nil, fmt.Errorf("empty entity ID") } - groups, err := i.memDBGroupsByMemberEntityID(entityID, false) + groups, err := i.MemDBGroupsByMemberEntityID(entityID, false, false) if err != nil { return nil, err } @@ -1903,7 +1974,7 @@ func (i *IdentityStore) transitiveGroupsByEntityID(entityID string) ([]*identity return nil, fmt.Errorf("empty entity ID") } - groups, err := i.memDBGroupsByMemberEntityID(entityID, false) + groups, err := i.MemDBGroupsByMemberEntityID(entityID, false, false) if err != nil { return nil, err } @@ -1946,7 +2017,7 @@ func (i *IdentityStore) collectGroupsReverseDFS(group *identity.Group, visited m // Traverse all the parent groups for _, parentGroupID := range group.ParentGroupIDs { - parentGroup, err := i.memDBGroupByID(parentGroupID, false) + parentGroup, err := i.MemDBGroupByID(parentGroupID, false) if err != nil { return nil, err } @@ -1974,7 +2045,7 @@ func (i *IdentityStore) collectPoliciesReverseDFS(group *identity.Group, visited // Traverse all the parent groups for _, parentGroupID := range group.ParentGroupIDs { - parentGroup, err := i.memDBGroupByID(parentGroupID, false) + parentGroup, err := i.MemDBGroupByID(parentGroupID, false) if err != nil { return nil, err } @@ -1999,7 +2070,7 @@ func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, } visited[groupID] = true - group, err := i.memDBGroupByID(groupID, true) + group, err := i.MemDBGroupByID(groupID, true) if err != nil { return false, err } @@ -2009,7 +2080,7 @@ func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, // Fetch all groups in which groupID is present as a ParentGroupID. In // other words, find all the subgroups of groupID. - memberGroups, err := i.memDBGroupsByParentGroupID(groupID, false) + memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false) if err != nil { return false, err } @@ -2030,7 +2101,7 @@ func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, func (i *IdentityStore) memberGroupIDsByID(groupID string) ([]string, error) { var memberGroupIDs []string - memberGroups, err := i.memDBGroupsByParentGroupID(groupID, false) + memberGroups, err := i.MemDBGroupsByParentGroupID(groupID, false) if err != nil { return nil, err } @@ -2040,10 +2111,10 @@ func (i *IdentityStore) memberGroupIDsByID(groupID string) ([]string, error) { return memberGroupIDs, nil } -func (i *IdentityStore) memDBGroupIterator(ws memdb.WatchSet) (memdb.ResultIterator, error) { +func (i *IdentityStore) MemDBGroupIterator(ws memdb.WatchSet) (memdb.ResultIterator, error) { txn := i.db.Txn(false) - iter, err := txn.Get("groups", "id") + iter, err := txn.Get(groupsTable, "id") if err != nil { return nil, err } @@ -2065,7 +2136,7 @@ OUTER: switch entryType { case "entity": - entity, err := i.memDBEntityByName(name, false) + entity, err := i.MemDBEntityByName(name, false) if err != nil { return "", err } @@ -2073,7 +2144,7 @@ OUTER: break OUTER } case "group": - group, err := i.memDBGroupByName(name, false) + group, err := i.MemDBGroupByName(name, false) if err != nil { return "", err } @@ -2088,7 +2159,7 @@ OUTER: return name, nil } -func (i *IdentityStore) memDBGroupsByBucketEntryKeyHash(hashValue string) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByBucketEntryKeyHash(hashValue string) ([]*identity.Group, error) { if hashValue == "" { return nil, fmt.Errorf("empty hash value") } @@ -2096,10 +2167,10 @@ func (i *IdentityStore) memDBGroupsByBucketEntryKeyHash(hashValue string) ([]*id txn := i.db.Txn(false) defer txn.Abort() - return i.memDBGroupsByBucketEntryKeyHashInTxn(txn, hashValue) + return i.MemDBGroupsByBucketEntryKeyHashInTxn(txn, hashValue) } -func (i *IdentityStore) memDBGroupsByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Group, error) { +func (i *IdentityStore) MemDBGroupsByBucketEntryKeyHashInTxn(txn *memdb.Txn, hashValue string) ([]*identity.Group, error) { if txn == nil { return nil, fmt.Errorf("nil txn") } @@ -2108,7 +2179,7 @@ func (i *IdentityStore) memDBGroupsByBucketEntryKeyHashInTxn(txn *memdb.Txn, has return nil, fmt.Errorf("empty hash value") } - groupsIter, err := txn.Get("groups", "bucket_key_hash", hashValue) + groupsIter, err := txn.Get(groupsTable, "bucket_key_hash", hashValue) if err != nil { return nil, fmt.Errorf("failed to lookup groups using bucket entry key hash: %v", err) } @@ -2120,3 +2191,196 @@ func (i *IdentityStore) memDBGroupsByBucketEntryKeyHashInTxn(txn *memdb.Txn, has return groups, nil } + +func (i *IdentityStore) MemDBGroupByAliasIDInTxn(txn *memdb.Txn, aliasID string, clone bool) (*identity.Group, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + if txn == nil { + return nil, fmt.Errorf("txn is nil") + } + + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, true) + if err != nil { + return nil, err + } + + if alias == nil { + return nil, nil + } + + return i.MemDBGroupByIDInTxn(txn, alias.CanonicalID, clone) +} + +func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identity.Group, error) { + if aliasID == "" { + return nil, fmt.Errorf("missing alias ID") + } + + txn := i.db.Txn(false) + + return i.MemDBGroupByAliasIDInTxn(txn, aliasID, clone) +} + +func (i *IdentityStore) deleteGroupAlias(aliasID string) error { + var err error + var alias *identity.Alias + var group *identity.Group + + if aliasID == "" { + return fmt.Errorf("missing alias ID") + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + txn := i.db.Txn(true) + defer txn.Abort() + + alias, err = i.MemDBAliasByIDInTxn(txn, aliasID, false, true) + if err != nil { + return err + } + + if alias == nil { + return nil + } + + group, err = i.MemDBGroupByAliasIDInTxn(txn, alias.ID, true) + if err != nil { + return err + } + + // If there is no group tied to a valid alias, something is wrong + if group == nil { + return fmt.Errorf("alias not associated to a group") + } + + // Delete group alias in memdb + err = i.MemDBDeleteAliasByIDInTxn(txn, group.Alias.ID, true) + if err != nil { + return err + } + + // Delete the alias + group.Alias = nil + + err = i.upsertGroupInTxn(txn, group, true) + if err != nil { + return err + } + + txn.Commit() + + return nil +} + +func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID string, groupAliases []*logical.Alias) error { + if entityID == "" { + return fmt.Errorf("empty entity ID") + } + + i.groupLock.Lock() + defer i.groupLock.Unlock() + + txn := i.db.Txn(true) + defer txn.Abort() + + oldGroups, err := i.MemDBGroupsByMemberEntityIDInTxn(txn, entityID, true, true) + if err != nil { + return err + } + + var newGroups []*identity.Group + for _, alias := range groupAliases { + aliasByFactors, err := i.MemDBAliasByFactors(alias.MountAccessor, alias.Name, true, true) + if err != nil { + return err + } + if aliasByFactors == nil { + continue + } + mappingGroup, err := i.MemDBGroupByAliasID(aliasByFactors.ID, true) + if err != nil { + return err + } + if mappingGroup == nil { + return fmt.Errorf("group unavailable for a valid alias ID %q", aliasByFactors.ID) + } + newGroups = append(newGroups, mappingGroup) + } + + diff := diffGroups(oldGroups, newGroups) + + // Add the entity ID to all the new groups + for _, group := range diff.New { + if group.Type != groupTypeExternal { + continue + } + + i.logger.Debug("adding member entity ID to external group", "member_entity_id", entityID, "group_id", group.ID) + + group.MemberEntityIDs = append(group.MemberEntityIDs, entityID) + + err = i.upsertGroupInTxn(txn, group, true) + if err != nil { + return err + } + } + + // Remove the entity ID from all the deleted groups + for _, group := range diff.Deleted { + if group.Type != groupTypeExternal { + continue + } + + i.logger.Debug("removing member entity ID from external group", "member_entity_id", entityID, "group_id", group.ID) + + group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entityID) + + err = i.upsertGroupInTxn(txn, group, true) + if err != nil { + return err + } + } + + txn.Commit() + + return nil +} + +// diffGroups is used to diff two sets of groups +func diffGroups(old, new []*identity.Group) *groupDiff { + diff := &groupDiff{} + + existing := make(map[string]*identity.Group) + for _, group := range old { + existing[group.ID] = group + } + + for _, group := range new { + // Check if the entry in new is present in the old + _, ok := existing[group.ID] + + // If its not present, then its a new entry + if !ok { + diff.New = append(diff.New, group) + continue + } + + // If its present, it means that its unmodified + diff.Unmodified = append(diff.Unmodified, group) + + // By deleting the unmodified from the old set, we could determine the + // ones that are stale by looking at the remaining ones. + delete(existing, group.ID) + } + + // Any remaining entries must have been deleted + for _, me := range existing { + diff.Deleted = append(diff.Deleted, me) + } + + return diff +} diff --git a/vault/request_handling.go b/vault/request_handling.go index 074d2158ac..87065589b0 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -284,6 +284,21 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r } } + // If the request was to renew a token, and if there are group aliases set + // in the auth object, then the group memberships should be refreshed + if strings.HasPrefix(req.Path, "auth/token/renew") && + resp != nil && + resp.Auth != nil && + resp.Auth.EntityID != "" && + resp.Auth.GroupAliases != nil { + err := c.identityStore.refreshExternalGroupMembershipsByEntityID(resp.Auth.EntityID, resp.Auth.GroupAliases) + if err != nil { + c.logger.Error("core: failed to refresh external group memberships", "error", err) + retErr = multierror.Append(retErr, ErrInternalError) + return nil, auth, retErr + } + } + // Only the token store is allowed to return an auth block, for any // other request this is an internal error. We exclude renewal of a token, // since it does not need to be re-registered @@ -322,6 +337,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r if routeErr != nil { retErr = multierror.Append(retErr, routeErr) } + return resp, auth, retErr } @@ -412,7 +428,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (retResp *logical.Respon var err error // Check if an entity already exists for the given alias - entity, err = c.identityStore.EntityByAliasFactors(auth.Alias.MountAccessor, auth.Alias.Name, false) + entity, err = c.identityStore.entityByAliasFactors(auth.Alias.MountAccessor, auth.Alias.Name, false) if err != nil { return nil, nil, err } @@ -430,6 +446,12 @@ func (c *Core) handleLoginRequest(req *logical.Request) (retResp *logical.Respon } auth.EntityID = entity.ID + if auth.GroupAliases != nil { + err = c.identityStore.refreshExternalGroupMembershipsByEntityID(auth.EntityID, auth.GroupAliases) + if err != nil { + return nil, nil, err + } + } } if strutil.StrListSubset(auth.Policies, []string{"root"}) { diff --git a/vault/router.go b/vault/router.go index 36bca28ab0..8bd839ebe4 100644 --- a/vault/router.go +++ b/vault/router.go @@ -471,10 +471,26 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica return nil, ok, exists, err } else { resp, err := re.backend.HandleRequest(req) + // When a token gets renewed, the request hits this path and reaches + // token store. Token store delegates the renewal to the expiration + // manager. Expiration manager in-turn creates a different logical + // request and forwards the request to the auth backend that had + // initially authenticated the login request. The forwarding to auth + // backend will make this code path hit for the second time for the + // same renewal request. The accessors in the Alias structs should be + // of the auth backend and not of the token store. Therefore, avoiding + // the overwriting of accessors by having a check for path prefix + // having "renew". This gets applied for "renew" and "renew-self" + // requests. if resp != nil && resp.Auth != nil && - resp.Auth.Alias != nil { - resp.Auth.Alias.MountAccessor = re.mountEntry.Accessor + !strings.HasPrefix(req.Path, "renew") { + if resp.Auth.Alias != nil { + resp.Auth.Alias.MountAccessor = re.mountEntry.Accessor + } + for _, alias := range resp.Auth.GroupAliases { + alias.MountAccessor = re.mountEntry.Accessor + } } return resp, false, false, err } From c7b5b8b0b4485a2dcf7734d07e67bbc5104a764d Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang Date: Thu, 2 Nov 2017 16:31:16 -0400 Subject: [PATCH 067/329] aws_region->region on awskms config --- command/server/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/server/config.go b/command/server/config.go index f51584e782..45afca0176 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -683,7 +683,7 @@ func parseSeal(result *Config, list *ast.ObjectList, blockName string) error { } case "awskms": valid = []string{ - "aws_region", + "region", "access_key", "secret_key", "kms_key_id", From 0321a867feff830d28d9f6710f0ea4535100fd54 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Thu, 2 Nov 2017 16:38:15 -0400 Subject: [PATCH 068/329] Added lookup endpoint for entity (#3519) * Added lookup endpoint for entity * Address review comments --- vault/identity_lookup.go | 74 ++++++++++++++++++++++++++++++++ vault/identity_lookup_test.go | 53 +++++++++++++++++++++++ vault/identity_store_entities.go | 10 +++-- 3 files changed, 133 insertions(+), 4 deletions(-) diff --git a/vault/identity_lookup.go b/vault/identity_lookup.go index 43f58953b8..bae1cf322a 100644 --- a/vault/identity_lookup.go +++ b/vault/identity_lookup.go @@ -4,12 +4,36 @@ import ( "fmt" "strings" + "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) func lookupPaths(i *IdentityStore) []*framework.Path { return []*framework.Path{ + { + Pattern: "lookup/entity$", + Fields: map[string]*framework.FieldSchema{ + "type": { + Type: framework.TypeString, + Description: "Type of lookup. Current supported values are 'id' and 'name'.", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the entity.", + }, + "id": { + Type: framework.TypeString, + Description: "ID of the entity.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: i.pathLookupEntityUpdate, + }, + + HelpSynopsis: strings.TrimSpace(lookupHelp["lookup-entity"][0]), + HelpDescription: strings.TrimSpace(lookupHelp["lookup-entity"][1]), + }, { Pattern: "lookup/group$", Fields: map[string]*framework.FieldSchema{ @@ -98,6 +122,47 @@ func lookupPaths(i *IdentityStore) []*framework.Path { } } +func (i *IdentityStore) pathLookupEntityUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + lookupType := d.Get("type").(string) + if lookupType == "" { + return logical.ErrorResponse("empty type"), nil + } + + var entity *identity.Entity + var err error + + switch lookupType { + case "id": + entityID := d.Get("id").(string) + if entityID == "" { + return logical.ErrorResponse("empty id"), nil + } + entity, err = i.MemDBEntityByID(entityID, false) + if err != nil { + return nil, err + } + + case "name": + entityName := d.Get("name").(string) + if entityName == "" { + return logical.ErrorResponse("empty name"), nil + } + entity, err = i.MemDBEntityByName(entityName, false) + if err != nil { + return nil, err + } + + default: + return logical.ErrorResponse(fmt.Sprintf("unrecognized type %q", lookupType)), nil + } + + if entity == nil { + return nil, nil + } + + return i.handleEntityReadCommon(entity) +} + func (i *IdentityStore) pathLookupGroupUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { lookupType := d.Get("type").(string) if lookupType == "" { @@ -196,6 +261,15 @@ func (i *IdentityStore) handleLookupAliasUpdateCommon(req *logical.Request, d *f } var lookupHelp = map[string][2]string{ + "lookup-entity": { + "Query entities based on types.", + `Supported types: + - 'id' + To query the entity by its ID. This requires 'id' parameter to be set. + - 'name' + To query the entity by its name. This requires 'name' parameter to be set. + `, + }, "lookup-group": { "Query groups based on types.", `Supported types: diff --git a/vault/identity_lookup_test.go b/vault/identity_lookup_test.go index 24ce3f4cdf..981cdcbb4b 100644 --- a/vault/identity_lookup_test.go +++ b/vault/identity_lookup_test.go @@ -6,6 +6,59 @@ import ( "github.com/hashicorp/vault/logical" ) +func TestIdentityStore_Lookup_Entity(t *testing.T) { + var err error + var resp *logical.Response + + i, _, _ := testIdentityStoreWithGithubAuth(t) + + entityReq := &logical.Request{ + Path: "entity", + Operation: logical.UpdateOperation, + } + resp, err = i.HandleRequest(entityReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %#v\nresp: %v", err, resp) + } + entityID := resp.Data["id"].(string) + + entity, err := i.MemDBEntityByID(entityID, false) + if err != nil { + t.Fatal(err) + } + + lookupReq := &logical.Request{ + Path: "lookup/entity", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": "id", + "id": entityID, + }, + } + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %#v\nresp: %v", err, resp) + } + + if resp.Data["id"].(string) != entityID { + t.Fatalf("bad: entity: %#v", resp.Data) + } + + lookupReq.Data = map[string]interface{}{ + "type": "name", + "name": entity.Name, + } + + resp, err = i.HandleRequest(lookupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %#v\nresp: %v", err, resp) + } + + if resp.Data["id"].(string) != entityID { + t.Fatalf("bad: entity: %#v", resp.Data) + } +} + func TestIdentityStore_Lookup_EntityAlias(t *testing.T) { var err error var resp *logical.Response diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index aee241a918..f326edc8fa 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -410,6 +410,10 @@ func (i *IdentityStore) pathEntityIDRead(req *logical.Request, d *framework.Fiel return nil, nil } + return i.handleEntityReadCommon(entity) +} + +func (i *IdentityStore) handleEntityReadCommon(entity *identity.Entity) (*logical.Response, error) { respData := map[string]interface{}{} respData["id"] = entity.ID respData["name"] = entity.Name @@ -442,11 +446,9 @@ func (i *IdentityStore) pathEntityIDRead(req *logical.Request, d *framework.Fiel // formats respData["aliases"] = aliasesToReturn - resp := &logical.Response{ + return &logical.Response{ Data: respData, - } - - return resp, nil + }, nil } // pathEntityIDDelete deletes the entity for a given entity ID From 87e98dce23fff465474ff5e52a0f3db54676aaf6 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 16:40:52 -0500 Subject: [PATCH 069/329] Check input size to avoid a panic (#3521) --- builtin/logical/transit/backend_test.go | 35 +++++++++++++++++++++++++ helper/keysutil/policy.go | 4 +++ 2 files changed, 39 insertions(+) diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index a9c27bcef6..315200fdab 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -1091,3 +1091,38 @@ func testPolicyFuzzingCommon(t *testing.T, be *backend) { // Wait for them all to finish wg.Wait() } + +func TestBadInput(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + b = Backend(&logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/test", + } + + resp, err := b.HandleRequest(req) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + + req.Path = "decrypt/test" + req.Data = map[string]interface{}{ + "ciphertext": "vault:v1:abcd", + } + + _, err = b.HandleRequest(req) + if err == nil { + t.Fatal("expected error") + } +} diff --git a/helper/keysutil/policy.go b/helper/keysutil/policy.go index 5e14334f6a..85591f8e44 100644 --- a/helper/keysutil/policy.go +++ b/helper/keysutil/policy.go @@ -675,6 +675,10 @@ func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { return "", errutil.InternalError{Err: err.Error()} } + if len(decoded) < gcm.NonceSize() { + return "", errutil.UserError{Err: "invalid ciphertext length"} + } + // Extract the nonce and ciphertext var ciphertext []byte if p.ConvergentEncryption && p.ConvergentVersion < 2 { From f3bc99d304c469ae1b11cc75c1f86fec428efdf7 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 2 Nov 2017 17:41:52 -0400 Subject: [PATCH 070/329] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 308fd5bd8b..cceb7408f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,8 @@ BUG FIXES: * physical/file: Fix listing when underscores are the first component of a path [GH-3476] * plugins: Allow response errors to be returned from backend plugins [GH-3412] + * secret/transit: Fix panic if the length of the input ciphertext was less + than the expected nonce length [GH-3521] ## 0.8.3 (September 19th, 2017) From f3aaacc3fc687dc08fb8710cd9baf44ccf9a9ee4 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 3 Nov 2017 07:19:49 +0000 Subject: [PATCH 071/329] Overhauling the client method and attaching it to the backend --- builtin/logical/nomad/backend.go | 53 +++++++++++++++++++++++++++ builtin/logical/nomad/client.go | 25 ------------- builtin/logical/nomad/path_token.go | 5 +-- builtin/logical/nomad/secret_token.go | 10 ++--- 4 files changed, 57 insertions(+), 36 deletions(-) delete mode 100644 builtin/logical/nomad/client.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 99386d095b..ea507e08fc 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,6 +1,10 @@ package nomad import ( + "fmt" + "sync" + + "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -34,4 +38,53 @@ func Backend() *backend { type backend struct { *framework.Backend + + client *api.Client + lock sync.RWMutex +} + +func (b *backend) Client(s logical.Storage) (*api.Client, error) { + + b.lock.RLock() + + // If we already have a client, return it + if b.client != nil { + b.lock.RUnlock() + return b.client, nil + } + + b.lock.RUnlock() + + conf, intErr := readConfigAccess(s) + if intErr != nil { + return nil, intErr + } + if conf == nil { + return nil, fmt.Errorf("no error received but no configuration found") + } + + nomadConf := api.DefaultConfig() + nomadConf.Address = conf.Address + nomadConf.SecretID = conf.Token + + b.lock.Lock() + defer b.lock.Unlock() + + // If the client was creted during the lock switch, return it + if b.client != nil { + return b.client, nil + } + var err error + b.client, err = api.NewClient(nomadConf) + if err != nil { + return nil, err + } + return b.client, nil +} + +func (b *backend) resetClient() { + b.lock.Lock() + defer b.lock.Unlock() + + b.client = nil } diff --git a/builtin/logical/nomad/client.go b/builtin/logical/nomad/client.go deleted file mode 100644 index 80879d76d8..0000000000 --- a/builtin/logical/nomad/client.go +++ /dev/null @@ -1,25 +0,0 @@ -package nomad - -import ( - "fmt" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/vault/logical" -) - -func client(s logical.Storage) (*api.Client, error, error) { - conf, intErr := readConfigAccess(s) - if intErr != nil { - return nil, nil, intErr - } - if conf == nil { - return nil, nil, fmt.Errorf("no error received but no configuration found") - } - - nomadConf := api.DefaultConfig() - nomadConf.Address = conf.Address - nomadConf.SecretID = conf.Token - - client, err := api.NewClient(nomadConf) - return client, nil, err -} diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index b8f5efa350..37e268f439 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -47,13 +47,10 @@ func (b *backend) pathTokenRead( } // Get the nomad client - c, userErr, intErr := client(req.Storage) + c, intErr := b.Client(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - return logical.ErrorResponse(userErr.Error()), nil - } // Generate a name for the token tokenName := fmt.Sprintf("Vault %s %s %d", name, req.DisplayName, time.Now().UnixNano()) diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 00df0fef7b..5ba048a8e5 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -20,7 +20,7 @@ func secretToken(b *backend) *framework.Secret { }, Renew: b.secretTokenRenew, - Revoke: secretTokenRevoke, + Revoke: b.secretTokenRevoke, } } @@ -30,16 +30,12 @@ func (b *backend) secretTokenRenew( return framework.LeaseExtend(0, 0, b.System())(req, d) } -func secretTokenRevoke( +func (b *backend) secretTokenRevoke( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - c, userErr, intErr := client(req.Storage) + c, intErr := b.Client(req.Storage) if intErr != nil { return nil, intErr } - if userErr != nil { - // Returning logical.ErrorResponse from revocation function is risky - return nil, userErr - } tokenRaw, _ := req.Secret.InternalData["accessor_id"] From 7015139ecef71a13a294a637201cb84344330df7 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Fri, 3 Nov 2017 07:25:47 +0000 Subject: [PATCH 072/329] Not storing the Nomad token as we have the accesor for administrative operations --- builtin/logical/nomad/path_token.go | 1 - 1 file changed, 1 deletion(-) diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 37e268f439..28a397858e 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -71,7 +71,6 @@ func (b *backend) pathTokenRead( "secret_id": token.SecretID, "accessor_id": token.AccessorID, }, map[string]interface{}{ - "secret_id": token.SecretID, "accessor_id": token.AccessorID, }) s.Secret.TTL = result.Lease From 7ca73556e493f4f1bcf318e386d140e5d444a499 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 3 Nov 2017 06:19:21 -0400 Subject: [PATCH 073/329] docs: Add config/ca delete operation (#3525) --- website/source/api/secret/ssh/index.html.md | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/website/source/api/secret/ssh/index.html.md b/website/source/api/secret/ssh/index.html.md index 2adc4d6562..c124eac173 100644 --- a/website/source/api/secret/ssh/index.html.md +++ b/website/source/api/secret/ssh/index.html.md @@ -608,8 +608,8 @@ This endpoint allows submitting the CA information for the backend via an SSH key pair. _If you have already set a certificate and key, they will be overridden._ -| Method | Path | Produces | -| :------- | :--------------------------- | :--------------------- | +| Method | Path | Produces | +| :------- | :--------------------------- | :------------------------- | | `POST` | `/ssh/config/ca` | `200/204 application/json` | ### Parameters @@ -660,6 +660,23 @@ This will return a `200` response if `generate_signing_key` was true: } ``` +## Delete CA Information + +This endpoint deletes the CA information for the backend via an SSH key pair. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `DELETE` | `/ssh/config/ca` | `204 (empty body)` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://vault.rocks/v1/ssh/config/ca +``` + ## Read Public Key (Unauthenticated) This endpoint returns the configured/generated public key. This is an unauthenticated From 2bbb8377e895c72e8b55f394bd8a72164a9b42ed Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 3 Nov 2017 07:19:29 -0400 Subject: [PATCH 074/329] Fix policy lookup when entity is part of multiple groups (#3524) --- vault/identity_store_groups_test.go | 76 +++++++++++++++++++++++++++++ vault/identity_store_util.go | 3 +- 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/vault/identity_store_groups_test.go b/vault/identity_store_groups_test.go index 21e382ba79..a7a7d16679 100644 --- a/vault/identity_store_groups_test.go +++ b/vault/identity_store_groups_test.go @@ -468,6 +468,82 @@ func TestIdentityStore_GroupsCRUD_ByID(t *testing.T) { } } +func TestIdentityStore_GroupMultiCase(t *testing.T) { + var resp *logical.Response + var err error + is, _, _ := testIdentityStoreWithGithubAuth(t) + groupRegisterReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "group", + } + + // Create 'build' group + buildGroupData := map[string]interface{}{ + "name": "build", + "policies": "buildpolicy", + } + groupRegisterReq.Data = buildGroupData + resp, err = is.HandleRequest(groupRegisterReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + buildGroupID := resp.Data["id"].(string) + + // Create 'deploy' group + deployGroupData := map[string]interface{}{ + "name": "deploy", + "policies": "deploypolicy", + } + groupRegisterReq.Data = deployGroupData + resp, err = is.HandleRequest(groupRegisterReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + deployGroupID := resp.Data["id"].(string) + + // Create an entity ID + entityRegisterReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "entity", + } + resp, err = is.HandleRequest(entityRegisterReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + entityID1 := resp.Data["id"].(string) + + // Add the entity as a member of 'build' group + entityIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "group/id/" + buildGroupID, + Data: map[string]interface{}{ + "member_entity_ids": []string{entityID1}, + }, + } + resp, err = is.HandleRequest(entityIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + + // Add the entity as a member of the 'deploy` group + entityIDReq.Path = "group/id/" + deployGroupID + resp, err = is.HandleRequest(entityIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v, err: %v", resp, err) + } + + policies, err := is.groupPoliciesByEntityID(entityID1) + if err != nil { + t.Fatal(err) + } + sort.Strings(policies) + expected := []string{"deploypolicy", "buildpolicy"} + sort.Strings(expected) + if !reflect.DeepEqual(expected, policies) { + t.Fatalf("bad: policies; expected: %#v\nactual:%#v", expected, policies) + } +} + /* Test groups hierarchy: eng diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index b6b5313607..b083a53dc6 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -1960,10 +1960,11 @@ func (i *IdentityStore) groupPoliciesByEntityID(entityID string) ([]string, erro visited := make(map[string]bool) var policies []string for _, group := range groups { - policies, err = i.collectPoliciesReverseDFS(group, visited, nil) + groupPolicies, err := i.collectPoliciesReverseDFS(group, visited, nil) if err != nil { return nil, err } + policies = append(policies, groupPolicies...) } return strutil.RemoveDuplicates(policies, false), nil From f3fd22db390e67aed13c86cd6dc9cd58ca6cf555 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 3 Nov 2017 09:31:39 -0400 Subject: [PATCH 075/329] fix unseal reset test (#3528) --- http/sys_seal_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index 82d40a2e41..902466ee4d 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -235,6 +235,7 @@ func TestSysUnseal_Reset(t *testing.T) { "t": json.Number("3"), "n": json.Number("5"), "progress": json.Number("0"), + "type": "shamir", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) From a3a781527e2b7ab9223749c98ae006e84a256be2 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 3 Nov 2017 10:29:42 -0400 Subject: [PATCH 076/329] fixing test after field rename (#3530) --- helper/storagepacker/storagepacker_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index 9f8f287763..992658777f 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -116,16 +116,16 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { alias1 := &identity.Alias{ ID: "alias_id", - EntityID: "entity_id", + CanonicalID: "canonical_id", MountType: "mount_type", MountAccessor: "mount_accessor", Metadata: map[string]string{ "aliasmkey": "aliasmvalue", }, - Name: "alias_name", - CreationTime: timeNow, - LastUpdateTime: timeNow, - MergedFromEntityIDs: []string{"merged_from_entity_id"}, + Name: "alias_name", + CreationTime: timeNow, + LastUpdateTime: timeNow, + MergedFromCanonicalIDs: []string{"merged_from_canonical_id"}, } entity := &identity.Entity{ From ced60dbc0c4ab09b2a95b6b58d9aa1abb570388d Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 3 Nov 2017 10:45:53 -0400 Subject: [PATCH 077/329] Encrypt/Decrypt/Sign/Verify using RSA in Transit backend (#3489) * encrypt/decrypt/sign/verify RSA * update path-help and doc * Fix the bug which was breaking convergent encryption * support both 2048 and 4096 * update doc to contain both 2048 and 4096 * Add test for encrypt, decrypt and rotate on RSA keys * Support exporting RSA keys * Add sign and verify test steps * Remove 'RSA' from PEM header * use the default salt length * Add 'RSA' to PEM header since openssl is expecting that * export rsa keys as signing-key as well * Comment the reasoning behind the PEM headers * remove comment * update comment * Parameterize hashing for RSA signing and verification * Added test steps to check hash algo choice for RSA sign/verify * fix test by using 'prehashed' --- builtin/logical/transit/backend_test.go | 185 ++++++++++++ builtin/logical/transit/path_encrypt_test.go | 2 +- builtin/logical/transit/path_export.go | 19 ++ builtin/logical/transit/path_keys.go | 37 ++- builtin/logical/transit/path_sign_verify.go | 22 +- .../logical/transit/path_sign_verify_test.go | 3 +- helper/keysutil/lock_manager.go | 8 +- helper/keysutil/policy.go | 268 +++++++++++------- .../source/api/secret/transit/index.html.md | 2 + 9 files changed, 434 insertions(+), 112 deletions(-) diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 315200fdab..71f94744db 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -38,6 +38,191 @@ func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { return b, config.StorageView } +func TestTransit_RSA(t *testing.T) { + testTransit_RSA(t, "rsa-2048") + testTransit_RSA(t, "rsa-4096") +} + +func testTransit_RSA(t *testing.T, keyType string) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + keyReq := &logical.Request{ + Path: "keys/rsa", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": keyType, + }, + Storage: storage, + } + + resp, err = b.HandleRequest(keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + + encryptReq := &logical.Request{ + Path: "encrypt/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "plaintext": plaintext, + }, + } + + resp, err = b.HandleRequest(encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + ciphertext1 := resp.Data["ciphertext"].(string) + + decryptReq := &logical.Request{ + Path: "decrypt/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "ciphertext": ciphertext1, + }, + } + + resp, err = b.HandleRequest(decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + decryptedPlaintext := resp.Data["plaintext"] + + if plaintext != decryptedPlaintext { + t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) + } + + // Rotate the key + rotateReq := &logical.Request{ + Path: "keys/rsa/rotate", + Operation: logical.UpdateOperation, + Storage: storage, + } + resp, err = b.HandleRequest(rotateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + // Encrypt again + resp, err = b.HandleRequest(encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + ciphertext2 := resp.Data["ciphertext"].(string) + + if ciphertext1 == ciphertext2 { + t.Fatalf("expected different ciphertexts") + } + + // See if the older ciphertext can still be decrypted + resp, err = b.HandleRequest(decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["plaintext"].(string) != plaintext { + t.Fatal("failed to decrypt old ciphertext after rotating the key") + } + + // Decrypt the new ciphertext + decryptReq.Data = map[string]interface{}{ + "ciphertext": ciphertext2, + } + resp, err = b.HandleRequest(decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["plaintext"].(string) != plaintext { + t.Fatal("failed to decrypt ciphertext after rotating the key") + } + + signReq := &logical.Request{ + Path: "sign/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "input": plaintext, + }, + } + resp, err = b.HandleRequest(signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + signature := resp.Data["signature"].(string) + + verifyReq := &logical.Request{ + Path: "verify/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "input": plaintext, + "signature": signature, + }, + } + + resp, err = b.HandleRequest(verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if !resp.Data["valid"].(bool) { + t.Fatalf("failed to verify the RSA signature") + } + + signReq.Data = map[string]interface{}{ + "input": plaintext, + "algorithm": "invalid", + } + resp, err = b.HandleRequest(signReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatal("expected an error response") + } + + signReq.Data = map[string]interface{}{ + "input": plaintext, + "algorithm": "sha2-512", + } + resp, err = b.HandleRequest(signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + signature = resp.Data["signature"].(string) + + verifyReq.Data = map[string]interface{}{ + "input": plaintext, + "signature": signature, + } + resp, err = b.HandleRequest(verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["valid"].(bool) { + t.Fatalf("expected validation to fail") + } + + verifyReq.Data = map[string]interface{}{ + "input": plaintext, + "signature": signature, + "algorithm": "sha2-512", + } + resp, err = b.HandleRequest(verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if !resp.Data["valid"].(bool) { + t.Fatalf("failed to verify the RSA signature") + } +} + func TestBackend_basic(t *testing.T) { decryptData := make(map[string]interface{}) logicaltest.Test(t, logicaltest.TestCase{ diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go index 6ab20db271..d586600453 100644 --- a/builtin/logical/transit/path_encrypt_test.go +++ b/builtin/logical/transit/path_encrypt_test.go @@ -26,7 +26,7 @@ func TestTransit_BatchEncryptionCase1(t *testing.T) { t.Fatalf("err:%v resp:%#v", err, resp) } - plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" encData := map[string]interface{}{ "plaintext": plaintext, diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go index a18db91b0f..a218c22d82 100644 --- a/builtin/logical/transit/path_export.go +++ b/builtin/logical/transit/path_export.go @@ -3,6 +3,7 @@ package transit import ( "crypto/ecdsa" "crypto/elliptic" + "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/pem" @@ -152,6 +153,9 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st switch policy.Type { case keysutil.KeyType_AES256_GCM96: return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096: + return encodeRSAPrivateKey(key.RSAKey), nil } case exportTypeSigningKey: @@ -165,12 +169,27 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st case keysutil.KeyType_ED25519: return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096: + return encodeRSAPrivateKey(key.RSAKey), nil } } return "", fmt.Errorf("unknown key type %v", policy.Type) } +func encodeRSAPrivateKey(key *rsa.PrivateKey) string { + // When encoding PKCS1, the PEM header should be `RSA PRIVATE KEY`. When Go + // has PKCS8 encoding support, we may want to change this. + derBytes := x509.MarshalPKCS1PrivateKey(key) + pemBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + return string(pemBytes) +} + func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { if k == nil { return "", errors.New("nil KeyEntry provided") diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index ad9a9188c2..42ce0b9b58 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -2,7 +2,9 @@ package transit import ( "crypto/elliptic" + "crypto/x509" "encoding/base64" + "encoding/pem" "fmt" "strconv" "time" @@ -40,9 +42,11 @@ func (b *backend) pathKeys() *framework.Path { "type": &framework.FieldSchema{ Type: framework.TypeString, Default: "aes256-gcm96", - Description: `The type of key to create. Currently, -"aes256-gcm96" (symmetric) and "ecdsa-p256" (asymmetric), and -'ed25519' (asymmetric) are supported. Defaults to "aes256-gcm96".`, + Description: ` +The type of key to create. Currently, "aes256-gcm96" (symmetric), "ecdsa-p256" +(asymmetric), 'ed25519' (asymmetric), 'rsa-2048' (asymmetric), 'rsa-4096' +(asymmetric) are supported. Defaults to "aes256-gcm96". +`, }, "derived": &framework.FieldSchema{ @@ -131,6 +135,10 @@ func (b *backend) pathPolicyWrite( polReq.KeyType = keysutil.KeyType_ECDSA_P256 case "ed25519": polReq.KeyType = keysutil.KeyType_ED25519 + case "rsa-2048": + polReq.KeyType = keysutil.KeyType_RSA2048 + case "rsa-4096": + polReq.KeyType = keysutil.KeyType_RSA4096 default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -225,7 +233,7 @@ func (b *backend) pathPolicyRead( } resp.Data["keys"] = retKeys - case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519: + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519, keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096: retKeys := map[string]map[string]interface{}{} for k, v := range p.Keys { key := asymKey{ @@ -253,6 +261,27 @@ func (b *backend) pathPolicyRead( } } key.Name = "ed25519" + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096: + key.Name = "rsa-2048" + if p.Type == keysutil.KeyType_RSA4096 { + key.Name = "rsa-4096" + } + + // Encode the RSA public key in PEM format to return over the + // API + derBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public()) + if err != nil { + return nil, fmt.Errorf("error marshaling RSA public key: %v", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return nil, fmt.Errorf("failed to PEM-encode RSA public key") + } + key.PublicKey = string(pemBytes) } retKeys[strconv.Itoa(k)] = structs.New(key).Map() diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index 074f7ff222..6bdc96d3c6 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -37,7 +37,6 @@ derivation is enabled; currently only available with ed25519 keys.`, Default: "sha2-256", Description: `Hash algorithm to use (POST body parameter). Valid values are: -* none * sha2-224 * sha2-256 * sha2-384 @@ -58,6 +57,11 @@ including ed25519.`, Must be 0 (for latest) or a value greater than or equal to the min_encryption_version configured on the key.`, }, + + "prehashed": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to 'true' when the input is already hashed. If the key type is 'rsa-2048' or 'rsa-4096', then the algorithm used to hash the input should be indicated by the 'algorithm' parameter.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -109,7 +113,6 @@ derivation is enabled; currently only available with ed25519 keys.`, Default: "sha2-256", Description: `Hash algorithm to use (POST body parameter). Valid values are: -* none * sha2-224 * sha2-256 * sha2-384 @@ -117,6 +120,11 @@ derivation is enabled; currently only available with ed25519 keys.`, Defaults to "sha2-256". Not valid for all key types.`, }, + + "prehashed": &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to 'true' when the input is already hashed. If the key type is 'rsa-2048' or 'rsa-4096', then the algorithm used to hash the input should be indicated by the 'algorithm' parameter.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -137,6 +145,7 @@ func (b *backend) pathSignWrite( if algorithm == "" { algorithm = d.Get("algorithm").(string) } + prehashed := d.Get("prehashed").(bool) input, err := base64.StdEncoding.DecodeString(inputB64) if err != nil { @@ -168,7 +177,7 @@ func (b *backend) pathSignWrite( } } - if p.Type.HashSignatureInput() && algorithm != "none" { + if p.Type.HashSignatureInput() && !prehashed { var hf hash.Hash switch algorithm { case "sha2-224": @@ -186,7 +195,7 @@ func (b *backend) pathSignWrite( input = hf.Sum(nil) } - sig, err := p.Sign(ver, context, input) + sig, err := p.Sign(ver, context, input, algorithm) if err != nil { return nil, err } @@ -230,6 +239,7 @@ func (b *backend) pathVerifyWrite( if algorithm == "" { algorithm = d.Get("algorithm").(string) } + prehashed := d.Get("prehashed").(bool) input, err := base64.StdEncoding.DecodeString(inputB64) if err != nil { @@ -261,7 +271,7 @@ func (b *backend) pathVerifyWrite( } } - if p.Type.HashSignatureInput() && algorithm != "none" { + if p.Type.HashSignatureInput() && !prehashed { var hf hash.Hash switch algorithm { case "sha2-224": @@ -279,7 +289,7 @@ func (b *backend) pathVerifyWrite( input = hf.Sum(nil) } - valid, err := p.VerifySignature(context, input, sig) + valid, err := p.VerifySignature(context, input, sig, algorithm) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go index 1ab994f1da..f23f0a8edb 100644 --- a/builtin/logical/transit/path_sign_verify_test.go +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -164,9 +164,10 @@ func TestTransit_SignVerify_P256(t *testing.T) { sig = signRequest(req, false, "") verifyRequest(req, false, "", sig) - req.Data["algorithm"] = "none" + req.Data["prehashed"] = true sig = signRequest(req, false, "") verifyRequest(req, false, "", sig) + delete(req.Data, "prehashed") // Test 512 and save sig for later to ensure we can't validate once min // decryption version is set diff --git a/helper/keysutil/lock_manager.go b/helper/keysutil/lock_manager.go index 7588199734..fc28ea0eee 100644 --- a/helper/keysutil/lock_manager.go +++ b/helper/keysutil/lock_manager.go @@ -256,7 +256,13 @@ func (lm *LockManager) getPolicyCommon(req PolicyRequest, lockType bool) (*Polic case KeyType_ED25519: if req.Convergent { lm.UnlockPolicy(lock, lockType) - return nil, nil, false, fmt.Errorf("convergent encryption not not supported for keys of type %v", req.KeyType) + return nil, nil, false, fmt.Errorf("convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_RSA2048, KeyType_RSA4096: + if req.Derived || req.Convergent { + lm.UnlockPolicy(lock, lockType) + return nil, nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) } default: diff --git a/helper/keysutil/policy.go b/helper/keysutil/policy.go index 85591f8e44..7ce53e266c 100644 --- a/helper/keysutil/policy.go +++ b/helper/keysutil/policy.go @@ -9,6 +9,7 @@ import ( "crypto/elliptic" "crypto/hmac" "crypto/rand" + "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/asn1" @@ -44,6 +45,8 @@ const ( KeyType_AES256_GCM96 = iota KeyType_ECDSA_P256 KeyType_ED25519 + KeyType_RSA2048 + KeyType_RSA4096 ) const ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)" @@ -61,7 +64,7 @@ type KeyType int func (kt KeyType) EncryptionSupported() bool { switch kt { - case KeyType_AES256_GCM96: + case KeyType_AES256_GCM96, KeyType_RSA2048, KeyType_RSA4096: return true } return false @@ -69,7 +72,7 @@ func (kt KeyType) EncryptionSupported() bool { func (kt KeyType) DecryptionSupported() bool { switch kt { - case KeyType_AES256_GCM96: + case KeyType_AES256_GCM96, KeyType_RSA2048, KeyType_RSA4096: return true } return false @@ -77,7 +80,7 @@ func (kt KeyType) DecryptionSupported() bool { func (kt KeyType) SigningSupported() bool { switch kt { - case KeyType_ECDSA_P256, KeyType_ED25519: + case KeyType_ECDSA_P256, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA4096: return true } return false @@ -85,7 +88,7 @@ func (kt KeyType) SigningSupported() bool { func (kt KeyType) HashSignatureInput() bool { switch kt { - case KeyType_ECDSA_P256: + case KeyType_ECDSA_P256, KeyType_RSA2048, KeyType_RSA4096: return true } return false @@ -107,6 +110,10 @@ func (kt KeyType) String() string { return "ecdsa-p256" case KeyType_ED25519: return "ed25519" + case KeyType_RSA2048: + return "rsa-2048" + case KeyType_RSA4096: + return "rsa-4096" } return "[unknown]" @@ -127,6 +134,8 @@ type KeyEntry struct { EC_Y *big.Int `json:"ec_y"` EC_D *big.Int `json:"ec_d"` + RSAKey *rsa.PrivateKey `json:"rsa_key"` + // The public key in an appropriate format for the type of key FormattedPublicKey string `json:"public_key"` @@ -519,13 +528,6 @@ func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} } - // Guard against a potentially invalid key type - switch p.Type { - case KeyType_AES256_GCM96: - default: - return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} - } - // Decode the plaintext value plaintext, err := base64.StdEncoding.DecodeString(value) if err != nil { @@ -543,62 +545,69 @@ func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} } - // Derive the key that should be used - key, err := p.DeriveKey(context, ver) - if err != nil { - return "", err - } + var ciphertext []byte - // Guard against a potentially invalid key type switch p.Type { case KeyType_AES256_GCM96: + // Derive the key that should be used + key, err := p.DeriveKey(context, ver) + if err != nil { + return "", err + } + + // Setup the cipher + aesCipher, err := aes.NewCipher(key) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + + if p.ConvergentEncryption { + switch p.ConvergentVersion { + case 1: + if len(nonce) != gcm.NonceSize() { + return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", gcm.NonceSize())} + } + default: + nonceHmac := hmac.New(sha256.New, context) + nonceHmac.Write(plaintext) + nonceSum := nonceHmac.Sum(nil) + nonce = nonceSum[:gcm.NonceSize()] + } + } else { + // Compute random nonce + nonce, err = uuid.GenerateRandomBytes(gcm.NonceSize()) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } + } + + // Encrypt and tag with GCM + ciphertext = gcm.Seal(nil, nonce, plaintext, nil) + + // Place the encrypted data after the nonce + if !p.ConvergentEncryption || p.ConvergentVersion > 1 { + ciphertext = append(nonce, ciphertext...) + } + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[ver].RSAKey + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} + } + default: return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } - // Setup the cipher - aesCipher, err := aes.NewCipher(key) - if err != nil { - return "", errutil.InternalError{Err: err.Error()} - } - - // Setup the GCM AEAD - gcm, err := cipher.NewGCM(aesCipher) - if err != nil { - return "", errutil.InternalError{Err: err.Error()} - } - - if p.ConvergentEncryption { - switch p.ConvergentVersion { - case 1: - if len(nonce) != gcm.NonceSize() { - return "", errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", gcm.NonceSize())} - } - default: - nonceHmac := hmac.New(sha256.New, context) - nonceHmac.Write(plaintext) - nonceSum := nonceHmac.Sum(nil) - nonce = nonceSum[:gcm.NonceSize()] - } - } else { - // Compute random nonce - nonce, err = uuid.GenerateRandomBytes(gcm.NonceSize()) - if err != nil { - return "", errutil.InternalError{Err: err.Error()} - } - } - - // Encrypt and tag with GCM - out := gcm.Seal(nil, nonce, plaintext, nil) - - // Place the encrypted data after the nonce - full := out - if !p.ConvergentEncryption || p.ConvergentVersion > 1 { - full = append(nonce, out...) - } - // Convert to base64 - encoded := base64.StdEncoding.EncodeToString(full) + encoded := base64.StdEncoding.EncodeToString(ciphertext) // Prepend some information encoded = "vault:v" + strconv.Itoa(ver) + ":" + encoded @@ -644,54 +653,61 @@ func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { return "", errutil.UserError{Err: ErrTooOld} } - // Derive the key that should be used - key, err := p.DeriveKey(context, ver) - if err != nil { - return "", err - } - - // Guard against a potentially invalid key type - switch p.Type { - case KeyType_AES256_GCM96: - default: - return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} - } - // Decode the base64 decoded, err := base64.StdEncoding.DecodeString(splitVerCiphertext[1]) if err != nil { return "", errutil.UserError{Err: "invalid ciphertext: could not decode base64"} } - // Setup the cipher - aesCipher, err := aes.NewCipher(key) - if err != nil { - return "", errutil.InternalError{Err: err.Error()} - } + var plain []byte - // Setup the GCM AEAD - gcm, err := cipher.NewGCM(aesCipher) - if err != nil { - return "", errutil.InternalError{Err: err.Error()} - } + switch p.Type { + case KeyType_AES256_GCM96: + key, err := p.DeriveKey(context, ver) + if err != nil { + return "", err + } - if len(decoded) < gcm.NonceSize() { - return "", errutil.UserError{Err: "invalid ciphertext length"} - } + // Setup the cipher + aesCipher, err := aes.NewCipher(key) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } - // Extract the nonce and ciphertext - var ciphertext []byte - if p.ConvergentEncryption && p.ConvergentVersion < 2 { - ciphertext = decoded - } else { - nonce = decoded[:gcm.NonceSize()] - ciphertext = decoded[gcm.NonceSize():] - } + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return "", errutil.InternalError{Err: err.Error()} + } - // Verify and Decrypt - plain, err := gcm.Open(nil, nonce, ciphertext, nil) - if err != nil { - return "", errutil.UserError{Err: "invalid ciphertext: unable to decrypt"} + if len(decoded) < gcm.NonceSize() { + return "", errutil.UserError{Err: "invalid ciphertext length"} + } + + // Extract the nonce and ciphertext + var ciphertext []byte + if p.ConvergentEncryption && p.ConvergentVersion < 2 { + ciphertext = decoded + } else { + nonce = decoded[:gcm.NonceSize()] + ciphertext = decoded[gcm.NonceSize():] + } + + // Verify and Decrypt + plain, err = gcm.Open(nil, nonce, ciphertext, nil) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: unable to decrypt"} + } + + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[ver].RSAKey + plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } return base64.StdEncoding.EncodeToString(plain), nil @@ -712,7 +728,7 @@ func (p *Policy) HMACKey(version int) ([]byte, error) { return p.Keys[version].HMACKey, nil } -func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) { +func (p *Policy) Sign(ver int, context, input []byte, algorithm string) (*SigningResult, error) { if !p.Type.SigningSupported() { return nil, fmt.Errorf("message signing not supported for key type %v", p.Type) } @@ -777,6 +793,28 @@ func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) { return nil, err } + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[ver].RSAKey + + var algo crypto.Hash + switch algorithm { + case "sha2-224": + algo = crypto.SHA224 + case "sha2-256": + algo = crypto.SHA256 + case "sha2-384": + algo = crypto.SHA384 + case "sha2-512": + algo = crypto.SHA512 + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported algorithm %s", algorithm)} + } + + sig, err = rsa.SignPSS(rand.Reader, key, algo, input, nil) + if err != nil { + return nil, err + } + default: return nil, fmt.Errorf("unsupported key type %v", p.Type) } @@ -792,7 +830,7 @@ func (p *Policy) Sign(ver int, context, input []byte) (*SigningResult, error) { return res, nil } -func (p *Policy) VerifySignature(context, input []byte, sig string) (bool, error) { +func (p *Policy) VerifySignature(context, input []byte, sig, algorithm string) (bool, error) { if !p.Type.SigningSupported() { return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} } @@ -861,6 +899,27 @@ func (p *Policy) VerifySignature(context, input []byte, sig string) (bool, error return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil + case KeyType_RSA2048, KeyType_RSA4096: + key := p.Keys[ver].RSAKey + + var algo crypto.Hash + switch algorithm { + case "sha2-224": + algo = crypto.SHA224 + case "sha2-256": + algo = crypto.SHA256 + case "sha2-384": + algo = crypto.SHA384 + case "sha2-512": + algo = crypto.SHA512 + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported algorithm %s", algorithm)} + } + + err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, nil) + + return err == nil, nil + default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } @@ -927,6 +986,17 @@ func (p *Policy) Rotate(storage logical.Storage) error { } entry.Key = pri entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) + + case KeyType_RSA2048, KeyType_RSA4096: + bitSize := 2048 + if p.Type == KeyType_RSA4096 { + bitSize = 4096 + } + + entry.RSAKey, err = rsa.GenerateKey(rand.Reader, bitSize) + if err != nil { + return err + } } p.Keys[p.LatestVersion] = entry diff --git a/website/source/api/secret/transit/index.html.md b/website/source/api/secret/transit/index.html.md index fa4de364a1..eaf32817a5 100644 --- a/website/source/api/secret/transit/index.html.md +++ b/website/source/api/secret/transit/index.html.md @@ -52,6 +52,8 @@ values set here cannot be changed after key creation. (symmetric, supports derivation) - `ecdsa-p256` – ECDSA using the P-256 elliptic curve (asymmetric) - `ed25519` – ED25519 (asymmetric, supports derivation) + - `rsa-2048` - RSA with bit size of 2048 (asymmetric) + - `rsa-4096` - RSA with bit size of 4096 (asymmetric) ### Sample Payload From a1a9707a2cfafe1e9d012e5ba2922f7dcc4e9431 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Fri, 3 Nov 2017 10:48:39 -0400 Subject: [PATCH 078/329] changelog++ --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cceb7408f6..49ecdf40fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,10 @@ DEPRECATIONS/CHANGES: the TTL/max TTL values will now be an integer number of seconds rather than a string. This better matches the API elsewhere in Vault. +FEATURES: + * ** RSA Support for Transit Backend**: Transit backend can now generate RSA + keys which can be used for encryption and signing. [GH-3489] + IMPROVEMENTS: * api: Add ability to set custom headers on each call [GH-3394] From 4d3b3bed0804e67c404344050b11f42d35dc5027 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 3 Nov 2017 11:17:59 -0400 Subject: [PATCH 079/329] docs: s/persona/alias (#3529) --- .../source/api/secret/identity/index.html.md | 78 +++++++++---------- website/source/api/system/mfa-duo.html.md | 8 +- website/source/api/system/mfa-okta.html.md | 8 +- website/source/api/system/mfa-pingid.html.md | 8 +- .../docs/enterprise/identity/index.html.md | 18 ++--- .../source/docs/enterprise/mfa/index.html.md | 6 +- .../docs/secrets/identity/index.html.md | 8 +- 7 files changed, 67 insertions(+), 67 deletions(-) diff --git a/website/source/api/secret/identity/index.html.md b/website/source/api/secret/identity/index.html.md index df87993706..a2e7269871 100644 --- a/website/source/api/secret/identity/index.html.md +++ b/website/source/api/secret/identity/index.html.md @@ -54,7 +54,7 @@ $ curl \ { "data": { "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297", - "personas": null + "aliases": null } } ``` @@ -93,7 +93,7 @@ $ curl \ "team": "vault" }, "name": "entity-c323de27-2ad2-5ded-dbf3-0c7ef98bc613", - "personas": [], + "aliases": [], "policies": [ "eng-dev", "infra-dev" @@ -147,14 +147,14 @@ $ curl \ { "data": { "id": "8d6a45e5-572f-8f13-d226-cd0d1ec57297", - "personas": null + "aliases": null } } ``` ## Delete Entity by ID -This endpoint deletes an entity and all its associated personas. +This endpoint deletes an entity and all its associated aliases. | Method | Path | Produces | | :--------- | :-------------------------- | :----------------------| @@ -209,29 +209,29 @@ $ curl \ } ``` -## Register Persona +## Register Alias -This endpoint creates a new persona and attaches it to the entity with the +This endpoint creates a new alias and attaches it to the entity with the given identifier. | Method | Path | Produces | | :------- | :------------------ | :----------------------| -| `POST` | `/identity/persona` | `200 application/json` | +| `POST` | `/identity/alias` | `200 application/json` | ### Parameters -- `name` (string: Required) - Name of the persona. Name should be the +- `name` (string: Required) - Name of the alias. Name should be the identifier of the client in the authentication source. For example, if the - persona belongs to userpass backend, the name should be a valid username - within userpass backend. If persona belongs to GitHub, it should be the + alias belongs to userpass backend, the name should be a valid username + within userpass backend. If alias belongs to GitHub, it should be the GitHub username. -- `entity_id` (string: required) - Entity ID to which this persona belongs to. +- `entity_id` (string: required) - Entity ID to which this alias belongs to. - `mount_accessor` (string: required) - Accessor of the mount to which the - persona should belong to. + alias should belong to. -- `metadata` `(list of strings: [])` – Metadata to be associated with the persona. Format should be a list of `key=value` pairs. +- `metadata` `(list of strings: [])` – Metadata to be associated with the alias. Format should be a list of `key=value` pairs. ### Sample Payload @@ -251,7 +251,7 @@ $ curl \ --header "X-Vault-Token: ..." \ --request POST \ --data @payload.json \ - https://vault.rocks/v1/identity/persona + https://vault.rocks/v1/identity/alias ``` ### Sample Response @@ -265,24 +265,24 @@ $ curl \ } ``` -## Read Persona by ID +## Read Alias by ID -This endpoint queries the persona by its identifier. +This endpoint queries the alias by its identifier. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `GET` | `/identity/persona/id/:id` | `200 application/json` | +| `GET` | `/identity/alias/id/:id` | `200 application/json` | ### Parameters -- `id` `(string: )` – Specifies the identifier of the persona. +- `id` `(string: )` – Specifies the identifier of the alias. ### Sample Request ``` $ curl \ --header "X-Vault-Token: ..." \ - https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 + https://vault.rocks/v1/identity/alias/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 ``` ### Sample Response @@ -306,31 +306,31 @@ $ curl \ } ``` -## Update Persona by ID +## Update Alias by ID -This endpoint is used to update an existing persona. +This endpoint is used to update an existing alias. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/identity/persona/id/:id` | `200 application/json` | +| `POST` | `/identity/alias/id/:id` | `200 application/json` | ### Parameters - `id` `(string: )` – Specifies the identifier of the entity. -- `name` (string: Required) - Name of the persona. Name should be the +- `name` (string: Required) - Name of the alias. Name should be the identifier of the client in the authentication source. For example, if the - persona belongs to userpass backend, the name should be a valid username - within userpass backend. If persona belongs to GitHub, it should be the + alias belongs to userpass backend, the name should be a valid username + within userpass backend. If alias belongs to GitHub, it should be the GitHub username. -- `entity_id` (string: required) - Entity ID to which this persona belongs to. +- `entity_id` (string: required) - Entity ID to which this alias belongs to. - `mount_accessor` (string: required) - Accessor of the mount to which the - persona should belong to. + alias should belong to. - `metadata` `(list of strings: [])` – Metadata to be associated with the - persona. Format should be a list of `key=value` pairs. + alias. Format should be a list of `key=value` pairs. ### Sample Payload @@ -350,7 +350,7 @@ $ curl \ --header "X-Vault-Token: ..." \ --request POST \ --data @payload.json \ - https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 + https://vault.rocks/v1/identity/alias/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 ``` ### Sample Response @@ -364,17 +364,17 @@ $ curl \ } ``` -### Delete Persona by ID +### Delete Alias by ID -This endpoint deletes a persona from its corresponding entity. +This endpoint deletes a alias from its corresponding entity. | Method | Path | Produces | | :--------- | :-------------------------- | :----------------------| -| `DELETE` | `/identity/persona/id/:id` | `204 (empty body)` | +| `DELETE` | `/identity/alias/id/:id` | `204 (empty body)` | ## Parameters -- `id` `(string: )` – Specifies the identifier of the persona. +- `id` `(string: )` – Specifies the identifier of the alias. ### Sample Request @@ -382,17 +382,17 @@ This endpoint deletes a persona from its corresponding entity. $ curl \ --header "X-Vault-Token: ..." \ --request DELETE \ - https://vault.rocks/v1/identity/persona/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 + https://vault.rocks/v1/identity/alias/id/34982d3d-e3ce-5d8b-6e5f-b9bb34246c31 ``` -### List Personas by ID +### List Aliases by ID -This endpoint returns a list of available personas by their identifiers. +This endpoint returns a list of available aliases by their identifiers. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `LIST` | `/identity/persona/id` | `200 application/json` | -| `GET` | `/identity/persona/id?list=true` | `200 application/json` | +| `LIST` | `/identity/alias/id` | `200 application/json` | +| `GET` | `/identity/alias/id?list=true` | `200 application/json` | ### Sample Request @@ -400,7 +400,7 @@ This endpoint returns a list of available personas by their identifiers. $ curl \ --header "X-Vault-Token: ..." \ --request LIST \ - https://vault.rocks/v1/identity/persona/id + https://vault.rocks/v1/identity/alias/id ``` ### Sample Response diff --git a/website/source/api/system/mfa-duo.html.md b/website/source/api/system/mfa-duo.html.md index db081fbca0..e0057042dd 100644 --- a/website/source/api/system/mfa-duo.html.md +++ b/website/source/api/system/mfa-duo.html.md @@ -18,12 +18,12 @@ This endpoint defines a MFA method of type Duo. - `name` `(string: )` – Name of the MFA method. -- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping. -- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: - - persona.name: The name returned by the mount configured via the `mount_accessor` parameter +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings: + - alias.name: The name returned by the mount configured via the `mount_accessor` parameter - entity.name: The name configured for the Entity - - persona.metadata.``: The value of the Persona's metadata parameter + - alias.metadata.``: The value of the Alias's metadata parameter - entity.metadata.``: The value of the Entity's metadata paramater - `secret_key` `(string)` - Secret key for Duo. diff --git a/website/source/api/system/mfa-okta.html.md b/website/source/api/system/mfa-okta.html.md index 1b82370eb6..79664df97a 100644 --- a/website/source/api/system/mfa-okta.html.md +++ b/website/source/api/system/mfa-okta.html.md @@ -18,12 +18,12 @@ This endpoint defines a MFA method of type Okta. - `name` `(string: )` – Name of the MFA method. -- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping. -- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: - - persona.name: The name returned by the mount configured via the `mount_accessor` parameter +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings: + - alias.name: The name returned by the mount configured via the `mount_accessor` parameter - entity.name: The name configured for the Entity - - persona.metadata.``: The value of the Persona's metadata parameter + - alias.metadata.``: The value of the Alias's metadata parameter - entity.metadata.``: The value of the Entity's metadata paramater - `org_name` `(string)` - Name of the organization to be used in the Okta API. diff --git a/website/source/api/system/mfa-pingid.html.md b/website/source/api/system/mfa-pingid.html.md index a519f87ee1..b73e4a4964 100644 --- a/website/source/api/system/mfa-pingid.html.md +++ b/website/source/api/system/mfa-pingid.html.md @@ -18,12 +18,12 @@ This endpoint defines a MFA method of type PingID. - `name` `(string: )` – Name of the MFA method. -- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Personas associated with this mount as the username in the mapping. +- `mount_accessor` `(string: )` - The mount to tie this method to for use in automatic mappings. The mapping will use the Name field of Aliases associated with this mount as the username in the mapping. -- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{persona.name}}@example.com"`. If blank, the Persona's Name field will be used as-is. Currently-supported mappings: - - persona.name: The name returned by the mount configured via the `mount_accessor` parameter +- `username_format` `(string)` - A format string for mapping Identity names to MFA method names. Values to substitute should be placed in `{{}}`. For example, `"{{alias.name}}@example.com"`. If blank, the Alias's Name field will be used as-is. Currently-supported mappings: + - alias.name: The name returned by the mount configured via the `mount_accessor` parameter - entity.name: The name configured for the Entity - - persona.metadata.``: The value of the Persona's metadata parameter + - alias.metadata.``: The value of the Alias's metadata parameter - entity.metadata.``: The value of the Entity's metadata paramater - `settings_file_base64` `(string)` - A base64-encoded third-party settings file retrieved from PingID's configuration page. diff --git a/website/source/docs/enterprise/identity/index.html.md b/website/source/docs/enterprise/identity/index.html.md index c51719f5a5..a71e2d38f4 100644 --- a/website/source/docs/enterprise/identity/index.html.md +++ b/website/source/docs/enterprise/identity/index.html.md @@ -14,12 +14,12 @@ the tokens used in Vault. ## Concepts -### Entities and Personas +### Entities and Aliases Each user will have multiple accounts with various identity providers. Users can now be mapped as `Entities` and their corresponding accounts with -authentication providers can be mapped as `Personas`. In essence, each entity -is made up of zero or more personas. +authentication providers can be mapped as `Aliases`. In essence, each entity +is made up of zero or more aliases. ### Entity Management @@ -50,15 +50,15 @@ _additional_ capabilities and not a replacement for the policies on the token, and to know the full set of capabilities of the token with an associated entity identifier, the policies on the token should be taken into account. -### Mount Bound Personas +### Mount Bound Aliases Vault supports multiple authentication backends and also allows enabling same -authentication backend on different mounts. The persona name of the user with +authentication backend on different mounts. The alias name of the user with each identity provider will be unique within the provider. But Vault also needs -to uniquely distinguish between conflicting persona names across different -mounts of these identity providers. Hence the persona name, in combination with +to uniquely distinguish between conflicting alias names across different +mounts of these identity providers. Hence the alias name, in combination with the authentication backend mount's accessor serve as the unique identifier of a -persona. +alias. ### Implicit Entities @@ -66,7 +66,7 @@ Operators can create entities for all the users of an auth mount beforehand and assign policies to them, so that when users login, the desired capabilities to the tokens via entities are already assigned. But if that's not done, upon a successful user login from any of the authentication backends, -Vault will create a new entity and assign a persona against the login that was +Vault will create a new entity and assign a alias against the login that was successful. Note that, tokens created using the token authentication backend will not have diff --git a/website/source/docs/enterprise/mfa/index.html.md b/website/source/docs/enterprise/mfa/index.html.md index 20dfa2a8e9..438aae9b79 100644 --- a/website/source/docs/enterprise/mfa/index.html.md +++ b/website/source/docs/enterprise/mfa/index.html.md @@ -25,17 +25,17 @@ MFA in Vault can be of the following types. - `Okta` - If Okta push is configured and enabled on a path, then the enrolled device of the user will get a push notification to approve or deny the access to the API. The Okta username will be derived from the caller identity's - persona. + alias. - `Duo` - If Duo push is configured and enabled on a path, then the enrolled device of the user will get a push notification to approve or deny the access to the API. The Duo username will be derived from the caller identity's - persona. + alias. - `PingID` - If PingID push is configured and enabled on a path, then the enrolled device of the user will get a push notification to approve or deny the access to the API. The PingID username will be derived from the caller - identity's persona. + identity's alias. ## Configuring MFA Methods diff --git a/website/source/docs/secrets/identity/index.html.md b/website/source/docs/secrets/identity/index.html.md index 540b6a1741..ff1dd17dd7 100644 --- a/website/source/docs/secrets/identity/index.html.md +++ b/website/source/docs/secrets/identity/index.html.md @@ -12,18 +12,18 @@ Name: `identity` The Identity secret backend is the identity management solution for Vault. It internally maintains the clients who are recognized by Vault. Each client is -internally termed as an `Entity`. An entity can have multiple `Personas`. For +internally termed as an `Entity`. An entity can have multiple `Aliases`. For example, a single user who has accounts in both Github and LDAP, can be mapped -to a single entity in Vault that has 2 personas, one of type Github and one of +to a single entity in Vault that has 2 aliases, one of type Github and one of type LDAP. When a client authenticates via any of the credential backend (except the Token backend), Vault creates a new entity and attaches a new -persona to it, if an entity doesn't already exist. The entity identifier will +alias to it, if an entity doesn't already exist. The entity identifier will be tied to the authenticated token. When such tokens are put to use, their entity identifiers are audit logged, marking a trail of actions performed by specific users. Identity store allows operators to **manage** the entities in Vault. Entities -can be created and personas can be tied to entities, via the ACL'd API. There +can be created and aliases can be tied to entities, via the ACL'd API. There can be policies set on the entities which adds capabilities to the tokens that are tied to entity identiers. The capabilities granted to tokens via the entities are **an addition** to the existing capabilities of the token and From d5ad857a862dfd4c1d69eeea4f9c0b00f9ec96bb Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 3 Nov 2017 11:20:10 -0400 Subject: [PATCH 080/329] Capabilities responds considering policies on entities and groups (#3522) * Capabilities endpoint will now return considering policies on entities and groups * refactor the policy derivation into a separate function * Docs: Update docs to reflect the change in capabilities endpoint --- vault/capabilities.go | 13 +++ vault/capabilities_test.go | 109 ++++++++++++++++++ vault/core.go | 95 ++++++++------- .../api/system/capabilities-accessor.html.md | 7 +- .../api/system/capabilities-self.html.md | 6 +- .../source/api/system/capabilities.html.md | 6 +- 6 files changed, 191 insertions(+), 45 deletions(-) diff --git a/vault/capabilities.go b/vault/capabilities.go index bbde54c8f4..795a150f91 100644 --- a/vault/capabilities.go +++ b/vault/capabilities.go @@ -37,6 +37,19 @@ func (c *Core) Capabilities(token, path string) ([]string, error) { policies = append(policies, policy) } + _, derivedPolicies, err := c.fetchEntityAndDerivedPolicies(te.EntityID) + if err != nil { + return nil, err + } + + for _, item := range derivedPolicies { + policy, err := c.policyStore.GetPolicy(item, PolicyTypeToken) + if err != nil { + return nil, err + } + policies = append(policies, policy) + } + if len(policies) == 0 { return []string{DenyCapability}, nil } diff --git a/vault/capabilities_test.go b/vault/capabilities_test.go index 19bd404262..dbe0103fa2 100644 --- a/vault/capabilities_test.go +++ b/vault/capabilities_test.go @@ -2,9 +2,118 @@ package vault import ( "reflect" + "sort" "testing" + + "github.com/hashicorp/vault/logical" ) +func TestCapabilities_DerivedPolicies(t *testing.T) { + var resp *logical.Response + var err error + + i, _, c := testIdentityStoreWithGithubAuth(t) + + policy1 := ` +name = "policy1" +path "secret/sample" { + capabilities = ["update", "create", "sudo"] +} +` + policy2 := ` +name = "policy2" +path "secret/sample" { + capabilities = ["read", "delete"] +} +` + + policy3 := ` +name = "policy3" +path "secret/sample" { + capabilities = ["list", "list"] +} +` + // Create the above policies + policy, _ := ParseACLPolicy(policy1) + err = c.policyStore.SetPolicy(policy) + if err != nil { + t.Fatalf("err: %v", err) + } + + policy, _ = ParseACLPolicy(policy2) + err = c.policyStore.SetPolicy(policy) + if err != nil { + t.Fatalf("err: %v", err) + } + + policy, _ = ParseACLPolicy(policy3) + err = c.policyStore.SetPolicy(policy) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create an entity and assign policy1 to it + entityReq := &logical.Request{ + Path: "entity", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "policies": "policy1", + }, + } + resp, err = i.HandleRequest(entityReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %#v\n", resp, err) + } + entityID := resp.Data["id"].(string) + + // Create a token for the entity and assign policy2 on the token + ent := &TokenEntry{ + ID: "capabilitiestoken", + Path: "secret/sample", + Policies: []string{"policy2"}, + EntityID: entityID, + } + if err := c.tokenStore.create(ent); err != nil { + t.Fatalf("err: %v", err) + } + + actual, err := c.Capabilities("capabilitiestoken", "secret/sample") + if err != nil { + t.Fatalf("err: %v", err) + } + expected := []string{"create", "read", "sudo", "delete", "update"} + sort.Strings(actual) + sort.Strings(expected) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } + + // Create a group and add the above created entity to it + groupReq := &logical.Request{ + Path: "group", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "member_entity_ids": []string{entityID}, + "policies": "policy3", + }, + } + resp, err = i.HandleRequest(groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %#v\n", resp, err) + } + + actual, err = c.Capabilities("capabilitiestoken", "secret/sample") + if err != nil { + t.Fatalf("err: %v", err) + } + expected = []string{"create", "read", "sudo", "delete", "update", "list"} + sort.Strings(actual) + sort.Strings(expected) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } +} + func TestCapabilities(t *testing.T) { c, _, token := TestCoreUnsealed(t) diff --git a/vault/core.go b/vault/core.go index 3a155f16e2..6ab857dd0c 100644 --- a/vault/core.go +++ b/vault/core.go @@ -661,6 +661,57 @@ func (c *Core) LookupToken(token string) (*TokenEntry, error) { return c.tokenStore.Lookup(token) } +// fetchEntityAndDerivedPolicies returns the entity object for the given entity +// ID. If the entity is merged into a different entity object, the entity into +// which the given entity ID is merged into will be returned. This function +// also returns the cumulative list of policies that the entity is entitled to. +// This list includes the policies from the entity itself and from all the +// groups in which the given entity ID is a member of. +func (c *Core) fetchEntityAndDerivedPolicies(entityID string) (*identity.Entity, []string, error) { + if entityID == "" { + return nil, nil, nil + } + + //c.logger.Debug("core: entity set on the token", "entity_id", te.EntityID) + + // Fetch the entity + entity, err := c.identityStore.MemDBEntityByID(entityID, false) + if err != nil { + c.logger.Error("core: failed to lookup entity using its ID", "error", err) + return nil, nil, err + } + + if entity == nil { + // If there was no corresponding entity object found, it is + // possible that the entity got merged into another entity. Try + // finding entity based on the merged entity index. + entity, err = c.identityStore.MemDBEntityByMergedEntityID(entityID, false) + if err != nil { + c.logger.Error("core: failed to lookup entity in merged entity ID index", "error", err) + return nil, nil, err + } + } + + var policies []string + if entity != nil { + //c.logger.Debug("core: entity successfully fetched; adding entity policies to token's policies to create ACL") + + // Attach the policies on the entity + policies = append(policies, entity.Policies...) + + groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID) + if err != nil { + c.logger.Error("core: failed to fetch group policies", "error", err) + return nil, nil, err + } + + // Attach the policies from all the groups + policies = append(policies, groupPolicies...) + } + + return entity, policies, err +} + func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntry, *identity.Entity, error) { defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now()) @@ -688,47 +739,13 @@ func (c *Core) fetchACLTokenEntryAndEntity(clientToken string) (*ACL, *TokenEntr tokenPolicies := te.Policies - var entity *identity.Entity - - // Append the policies of the entity to those on the tokens and create ACL - // off of the combined list. - if te.EntityID != "" { - //c.logger.Debug("core: entity set on the token", "entity_id", te.EntityID) - // Fetch entity for the entity ID in the token entry - entity, err = c.identityStore.MemDBEntityByID(te.EntityID, false) - if err != nil { - c.logger.Error("core: failed to lookup entity using its ID", "error", err) - return nil, nil, nil, ErrInternalError - } - - if entity == nil { - // If there was no corresponding entity object found, it is - // possible that the entity got merged into another entity. Try - // finding entity based on the merged entity index. - entity, err = c.identityStore.MemDBEntityByMergedEntityID(te.EntityID, false) - if err != nil { - c.logger.Error("core: failed to lookup entity in merged entity ID index", "error", err) - return nil, nil, nil, ErrInternalError - } - } - - if entity != nil { - //c.logger.Debug("core: entity successfully fetched; adding entity policies to token's policies to create ACL") - // Attach the policies on the entity to the policies tied to the token - tokenPolicies = append(tokenPolicies, entity.Policies...) - - groupPolicies, err := c.identityStore.groupPoliciesByEntityID(entity.ID) - if err != nil { - c.logger.Error("core: failed to fetch group policies", "error", err) - return nil, nil, nil, ErrInternalError - } - - // Attach the policies from all the groups to which this entity ID - // belongs to - tokenPolicies = append(tokenPolicies, groupPolicies...) - } + entity, derivedPolicies, err := c.fetchEntityAndDerivedPolicies(te.EntityID) + if err != nil { + return nil, nil, nil, ErrInternalError } + tokenPolicies = append(tokenPolicies, derivedPolicies...) + // Construct the corresponding ACL object acl, err := c.policyStore.ACL(tokenPolicies...) if err != nil { diff --git a/website/source/api/system/capabilities-accessor.html.md b/website/source/api/system/capabilities-accessor.html.md index 06a2bf3504..011850ce72 100644 --- a/website/source/api/system/capabilities-accessor.html.md +++ b/website/source/api/system/capabilities-accessor.html.md @@ -9,8 +9,11 @@ description: |- # `/sys/capabilities-accessor` -The `/sys/capabilities-accessor` endpoint is used to fetch the capabilities of a -token associated with an accessor. +The `/sys/capabilities-accessor` endpoint is used to fetch the capabilities of +a token associated with an accessor. The capabilities returned will be derived +from the policies that are on the token, and from the policies to which token +is entitled to through the entity and entity's group memberships. + ## Query Token Accessor Capabilities diff --git a/website/source/api/system/capabilities-self.html.md b/website/source/api/system/capabilities-self.html.md index 4adfb96cd0..6957ea9efd 100644 --- a/website/source/api/system/capabilities-self.html.md +++ b/website/source/api/system/capabilities-self.html.md @@ -9,8 +9,10 @@ description: |- # `/sys/capabilities-self` -The `/sys/capabilities-self` endpoint is used to fetch the capabilities of a the -supplied token. +The `/sys/capabilities-self` endpoint is used to fetch the capabilities of a +the supplied token. The capabilities returned will be derived from the +policies that are on the token, and from the policies to which token is +entitled to through the entity and entity's group memberships. ## Query Self Capabilities diff --git a/website/source/api/system/capabilities.html.md b/website/source/api/system/capabilities.html.md index a8ef2c4e5d..c2449b03c0 100644 --- a/website/source/api/system/capabilities.html.md +++ b/website/source/api/system/capabilities.html.md @@ -9,8 +9,10 @@ description: |- # `/sys/capabilities` -The `/sys/capabilities` endpoint is used to fetch the capabilities of a token on -a given path. +The `/sys/capabilities` endpoint is used to fetch the capabilities of a token +on a given path. The capabilities returned will be derived from the policies +that are on the token, and from the policies to which token is entitled to +through the entity and entity's group memberships. ## Query Token Capabilities From d2e23f183c76a1447a3fd380b99df8b7f0f62fc7 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Fri, 3 Nov 2017 11:26:22 -0400 Subject: [PATCH 081/329] Fix group/policy iterators with multiple groups (#3527) * fixing some group iterators * fix slice rewrite --- vault/identity_store_util.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index b083a53dc6..4fb99e8be8 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -1983,10 +1983,11 @@ func (i *IdentityStore) transitiveGroupsByEntityID(entityID string) ([]*identity visited := make(map[string]bool) var tGroups []*identity.Group for _, group := range groups { - tGroups, err = i.collectGroupsReverseDFS(group, visited, nil) + gGroups, err := i.collectGroupsReverseDFS(group, visited, nil) if err != nil { return nil, err } + tGroups = append(tGroups, gGroups...) } // Remove duplicates @@ -1995,7 +1996,7 @@ func (i *IdentityStore) transitiveGroupsByEntityID(entityID string) ([]*identity groupMap[group.ID] = group } - tGroups = nil + tGroups = make([]*identity.Group, 0, len(groupMap)) for _, group := range groupMap { tGroups = append(tGroups, group) } @@ -2022,10 +2023,11 @@ func (i *IdentityStore) collectGroupsReverseDFS(group *identity.Group, visited m if err != nil { return nil, err } - groups, err = i.collectGroupsReverseDFS(parentGroup, visited, groups) + pGroups, err := i.collectGroupsReverseDFS(parentGroup, visited, groups) if err != nil { return nil, fmt.Errorf("failed to collect group at parent group ID %q", parentGroup.ID) } + groups = append(groups, pGroups...) } return groups, nil @@ -2050,13 +2052,14 @@ func (i *IdentityStore) collectPoliciesReverseDFS(group *identity.Group, visited if err != nil { return nil, err } - policies, err = i.collectPoliciesReverseDFS(parentGroup, visited, policies) + parentPolicies, err := i.collectPoliciesReverseDFS(parentGroup, visited, policies) if err != nil { return nil, fmt.Errorf("failed to collect policies at parent group ID %q", parentGroup.ID) } + policies = append(policies, parentPolicies...) } - return policies, nil + return strutil.RemoveDuplicates(policies, false), nil } func (i *IdentityStore) detectCycleDFS(visited map[string]bool, startingGroupID, groupID string) (bool, error) { @@ -2225,10 +2228,6 @@ func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identi } func (i *IdentityStore) deleteGroupAlias(aliasID string) error { - var err error - var alias *identity.Alias - var group *identity.Group - if aliasID == "" { return fmt.Errorf("missing alias ID") } @@ -2239,7 +2238,7 @@ func (i *IdentityStore) deleteGroupAlias(aliasID string) error { txn := i.db.Txn(true) defer txn.Abort() - alias, err = i.MemDBAliasByIDInTxn(txn, aliasID, false, true) + alias, err := i.MemDBAliasByIDInTxn(txn, aliasID, false, true) if err != nil { return err } @@ -2248,7 +2247,7 @@ func (i *IdentityStore) deleteGroupAlias(aliasID string) error { return nil } - group, err = i.MemDBGroupByAliasIDInTxn(txn, alias.ID, true) + group, err := i.MemDBGroupByAliasIDInTxn(txn, alias.ID, true) if err != nil { return err } From 8004f052da4c68f7c3b926e60c51fc8064e83617 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 3 Nov 2017 11:43:31 -0400 Subject: [PATCH 082/329] Add some more SealWrap declarations (#3531) --- builtin/credential/aws/backend.go | 3 +++ builtin/credential/ldap/backend.go | 4 ++++ builtin/credential/okta/backend.go | 3 +++ builtin/credential/radius/backend.go | 4 ++++ builtin/logical/aws/backend.go | 3 +++ builtin/logical/cassandra/backend.go | 6 ++++++ builtin/logical/consul/backend.go | 6 ++++++ builtin/logical/database/backend.go | 6 ++++++ builtin/logical/mongodb/backend.go | 6 ++++++ builtin/logical/mssql/backend.go | 6 ++++++ builtin/logical/mysql/backend.go | 6 ++++++ builtin/logical/postgresql/backend.go | 6 ++++++ builtin/logical/rabbitmq/backend.go | 6 ++++++ 13 files changed, 65 insertions(+) diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 30feba9ca0..cb1c97017e 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -99,6 +99,9 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { LocalStorage: []string{ "whitelist/identity/", }, + SealWrapStorage: []string{ + "config/client", + }, }, Paths: []*framework.Path{ pathLogin(b), diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index 08af4c77c4..9099905737 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -31,6 +31,10 @@ func Backend() *backend { Unauthenticated: []string{ "login/*", }, + + SealWrapStorage: []string{ + "config", + }, }, Paths: append([]*framework.Path{ diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index eab2c7ce58..e91d663587 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -25,6 +25,9 @@ func Backend() *backend { Unauthenticated: []string{ "login/*", }, + SealWrapStorage: []string{ + "config", + }, }, Paths: append([]*framework.Path{ diff --git a/builtin/credential/radius/backend.go b/builtin/credential/radius/backend.go index 49dcb7f6f0..c986ff9dc1 100644 --- a/builtin/credential/radius/backend.go +++ b/builtin/credential/radius/backend.go @@ -26,6 +26,10 @@ func Backend() *backend { "login", "login/*", }, + + SealWrapStorage: []string{ + "config", + }, }, Paths: append([]*framework.Path{ diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go index b6341e0620..c76a95e851 100644 --- a/builtin/logical/aws/backend.go +++ b/builtin/logical/aws/backend.go @@ -25,6 +25,9 @@ func Backend() *backend { LocalStorage: []string{ framework.WALPrefix, }, + SealWrapStorage: []string{ + "config/root", + }, }, Paths: []*framework.Path{ diff --git a/builtin/logical/cassandra/backend.go b/builtin/logical/cassandra/backend.go index dd54ba5b42..906ad72c57 100644 --- a/builtin/logical/cassandra/backend.go +++ b/builtin/logical/cassandra/backend.go @@ -25,6 +25,12 @@ func Backend() *backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathRoles(&b), diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go index 9fd09ac811..74551320c4 100644 --- a/builtin/logical/consul/backend.go +++ b/builtin/logical/consul/backend.go @@ -16,6 +16,12 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) { func Backend() *backend { var b backend b.Backend = &framework.Backend{ + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/access", + }, + }, + Paths: []*framework.Path{ pathConfigAccess(), pathListRoles(&b), diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index ffc1a40aa6..a8a54a7cde 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -28,6 +28,12 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/*", + }, + }, + Paths: []*framework.Path{ pathListPluginConnection(&b), pathConfigurePluginConnection(&b), diff --git a/builtin/logical/mongodb/backend.go b/builtin/logical/mongodb/backend.go index d850e8aa8b..860f5785f8 100644 --- a/builtin/logical/mongodb/backend.go +++ b/builtin/logical/mongodb/backend.go @@ -24,6 +24,12 @@ func Backend() *framework.Backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathConfigLease(&b), diff --git a/builtin/logical/mssql/backend.go b/builtin/logical/mssql/backend.go index ccd981badd..5cdeb3a329 100644 --- a/builtin/logical/mssql/backend.go +++ b/builtin/logical/mssql/backend.go @@ -24,6 +24,12 @@ func Backend() *backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathConfigLease(&b), diff --git a/builtin/logical/mysql/backend.go b/builtin/logical/mysql/backend.go index a89cc49de3..18be6ecd00 100644 --- a/builtin/logical/mysql/backend.go +++ b/builtin/logical/mysql/backend.go @@ -24,6 +24,12 @@ func Backend() *backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathConfigLease(&b), diff --git a/builtin/logical/postgresql/backend.go b/builtin/logical/postgresql/backend.go index 4a689f899b..2d509d3511 100644 --- a/builtin/logical/postgresql/backend.go +++ b/builtin/logical/postgresql/backend.go @@ -25,6 +25,12 @@ func Backend(conf *logical.BackendConfig) *backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathConfigLease(&b), diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go index 1e3f1ec061..e66e5796cf 100644 --- a/builtin/logical/rabbitmq/backend.go +++ b/builtin/logical/rabbitmq/backend.go @@ -26,6 +26,12 @@ func Backend() *backend { b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + Paths: []*framework.Path{ pathConfigConnection(&b), pathConfigLease(&b), From 3d51b92648a898784209b5088898bab66133a919 Mon Sep 17 00:00:00 2001 From: Ben Higgins Date: Fri, 3 Nov 2017 10:38:16 -0700 Subject: [PATCH 083/329] vault: recover from standby losing etcd lease (#3031) (#3511) This change makes these errors transient instead of permanent: [ERROR] core: failed to acquire lock: error=etcdserver: requested lease not found After this change, there can still be one of these errors when a standby vault that lost its lease tries to become leader, but on the next lock acquisition attempt a new session will be created. With this new session, the standby will be able to become the leader. --- physical/etcd/etcd3.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 7d9861f227..9561eaae11 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -267,6 +267,21 @@ func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { return nil, EtcdLockHeldError } + select { + case _, ok := <-c.etcdSession.Done(): + if !ok { + // The session's done channel is closed, so the session is over, + // and we need a new one + session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(etcd3LockTimeoutInSeconds)) + if err != nil { + return nil, err + } + c.etcdSession = session + c.etcdMu = concurrency.NewMutex(session, c.prefix) + } + default: + } + ctx, cancel := context.WithCancel(context.Background()) go func() { <-stopCh From 6e4cc6af0114abe1614c71837d2319a4acfe4084 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 3 Nov 2017 13:38:55 -0400 Subject: [PATCH 084/329] changelog++ --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49ecdf40fd..e931829099 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,8 @@ BUG FIXES: responses when requests were forwarded to the active node [GH-3485] * physical/etcd3: Fix some listing issues due to how etcd3 does prefix matching [GH-3406] + * physical/etcd3: Fix case where standbys can lose their etcd client lease + [GH-3031] * physical/file: Fix listing when underscores are the first component of a path [GH-3476] * plugins: Allow response errors to be returned from backend plugins [GH-3412] From 68320d0c38d4c3f1cb0c2aab77da907ae5ed0b16 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 3 Nov 2017 13:40:19 -0400 Subject: [PATCH 085/329] changelog++ --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e931829099..551b14fba0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ DEPRECATIONS/CHANGES: a string. This better matches the API elsewhere in Vault. FEATURES: + * ** RSA Support for Transit Backend**: Transit backend can now generate RSA keys which can be used for encryption and signing. [GH-3489] From ca76bc4f441e3379ec3fb9d9a138580072d37120 Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang Date: Fri, 3 Nov 2017 17:12:03 -0400 Subject: [PATCH 086/329] Return role info for each role on pathRoleList (#3532) * Return role info for each role on pathRoleList * Change roles -> key_info, only return key_type * Do not initialize result map in parseRole, refactor ListResponseWithInfo * Add role list test --- builtin/logical/ssh/backend_test.go | 44 +++++++ builtin/logical/ssh/path_roles.go | 173 ++++++++++++++++++---------- logical/response.go | 20 ++++ 3 files changed, 174 insertions(+), 63 deletions(-) diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 139d24acad..654f7bd285 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -271,6 +271,31 @@ func TestSSHBackend_Lookup(t *testing.T) { }) } +func TestSSHBackend_RoleList(t *testing.T) { + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + resp1 := map[string]interface{}{} + resp2 := map[string]interface{}{ + "keys": []string{testOTPRoleName}, + "key_info": map[string]interface{}{ + testOTPRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + }, + } + logicaltest.Test(t, logicaltest.TestCase{ + Factory: testingFactory, + Steps: []logicaltest.TestStep{ + testRoleList(t, resp1), + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testRoleList(t, resp2), + }, + }) +} + func TestSSHBackend_DynamicKeyCreate(t *testing.T) { testDynamicRoleData := map[string]interface{}{ "key_type": testDynamicKeyType, @@ -962,6 +987,25 @@ func testRoleWrite(t *testing.T, name string, data map[string]interface{}) logic } } +func testRoleList(t *testing.T, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "roles", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("nil response") + } + if resp.Data == nil { + return fmt.Errorf("nil data") + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("Invalid response:\nactual:%#v\nexpected is %#v", resp.Data, expected) + } + return nil + }, + } +} + func testRoleRead(t *testing.T, roleName string, expected map[string]interface{}) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index de2f1d5ffd..ec2d534496 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -488,13 +488,115 @@ func (b *backend) getRole(s logical.Storage, n string) (*sshRole, error) { return &result, nil } +// parseRole converts a sshRole object into its map[string]interface representation, +// with appropriate values for each KeyType. If the KeyType is invalid, it will retun +// an error. +func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { + var result map[string]interface{} + + switch role.KeyType { + case KeyTypeOTP: + result = map[string]interface{}{ + "default_user": role.DefaultUser, + "cidr_list": role.CIDRList, + "exclude_cidr_list": role.ExcludeCIDRList, + "key_type": role.KeyType, + "port": role.Port, + "allowed_users": role.AllowedUsers, + } + case KeyTypeCA: + ttl, err := parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return nil, err + } + maxTTL, err := parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return nil, err + } + + result = map[string]interface{}{ + "allowed_users": role.AllowedUsers, + "allowed_domains": role.AllowedDomains, + "default_user": role.DefaultUser, + "ttl": int64(ttl.Seconds()), + "max_ttl": int64(maxTTL.Seconds()), + "allowed_critical_options": role.AllowedCriticalOptions, + "allowed_extensions": role.AllowedExtensions, + "allow_user_certificates": role.AllowUserCertificates, + "allow_host_certificates": role.AllowHostCertificates, + "allow_bare_domains": role.AllowBareDomains, + "allow_subdomains": role.AllowSubdomains, + "allow_user_key_ids": role.AllowUserKeyIDs, + "key_id_format": role.KeyIDFormat, + "key_type": role.KeyType, + "default_critical_options": role.DefaultCriticalOptions, + "default_extensions": role.DefaultExtensions, + } + case KeyTypeDynamic: + result = map[string]interface{}{ + "key": role.KeyName, + "admin_user": role.AdminUser, + "default_user": role.DefaultUser, + "cidr_list": role.CIDRList, + "exclude_cidr_list": role.ExcludeCIDRList, + "port": role.Port, + "key_type": role.KeyType, + "key_bits": role.KeyBits, + "allowed_users": role.AllowedUsers, + "key_option_specs": role.KeyOptionSpecs, + // Returning install script will make the output look messy. + // But this is one way for clients to see the script that is + // being used to install the key. If there is some problem, + // the script can be modified and configured by clients. + "install_script": role.InstallScript, + } + default: + return nil, fmt.Errorf("invalid key type: %v", role.KeyType) + } + + return result, nil +} + func (b *backend) pathRoleList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { entries, err := req.Storage.List("roles/") if err != nil { return nil, err } - return logical.ListResponse(entries), nil + keyInfo := map[string]interface{}{} + for _, entry := range entries { + role, err := b.getRole(req.Storage, entry) + if err != nil { + // On error, log warning and continue + if b.Logger().IsWarn() { + b.Logger().Warn("ssh: error getting role info", "role", entry, "error", err) + } + continue + } + if role == nil { + // On empty role, log warning and continue + if b.Logger().IsWarn() { + b.Logger().Warn("ssh: no role info found", "role", entry) + } + continue + } + + roleInfo, err := b.parseRole(role) + if err != nil { + if b.Logger().IsWarn() { + b.Logger().Warn("ssh: error parsing role info", "role", entry, "error", err) + } + continue + } + + if keyType, ok := roleInfo["key_type"]; ok { + keyInfo[entry] = map[string]interface{}{ + "key_type": keyType, + } + } + } + + return logical.ListResponseWithInfo(entries, keyInfo), nil } func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -506,69 +608,14 @@ func (b *backend) pathRoleRead(req *logical.Request, d *framework.FieldData) (*l return nil, nil } - // Return information should be based on the key type of the role - if role.KeyType == KeyTypeOTP { - return &logical.Response{ - Data: map[string]interface{}{ - "default_user": role.DefaultUser, - "cidr_list": role.CIDRList, - "exclude_cidr_list": role.ExcludeCIDRList, - "key_type": role.KeyType, - "port": role.Port, - "allowed_users": role.AllowedUsers, - }, - }, nil - } else if role.KeyType == KeyTypeCA { - ttl, err := parseutil.ParseDurationSecond(role.TTL) - if err != nil { - return nil, err - } - maxTTL, err := parseutil.ParseDurationSecond(role.MaxTTL) - if err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "allowed_users": role.AllowedUsers, - "allowed_domains": role.AllowedDomains, - "default_user": role.DefaultUser, - "ttl": int64(ttl.Seconds()), - "max_ttl": int64(maxTTL.Seconds()), - "allowed_critical_options": role.AllowedCriticalOptions, - "allowed_extensions": role.AllowedExtensions, - "allow_user_certificates": role.AllowUserCertificates, - "allow_host_certificates": role.AllowHostCertificates, - "allow_bare_domains": role.AllowBareDomains, - "allow_subdomains": role.AllowSubdomains, - "allow_user_key_ids": role.AllowUserKeyIDs, - "key_id_format": role.KeyIDFormat, - "key_type": role.KeyType, - "default_critical_options": role.DefaultCriticalOptions, - "default_extensions": role.DefaultExtensions, - }, - }, nil - } else { - return &logical.Response{ - Data: map[string]interface{}{ - "key": role.KeyName, - "admin_user": role.AdminUser, - "default_user": role.DefaultUser, - "cidr_list": role.CIDRList, - "exclude_cidr_list": role.ExcludeCIDRList, - "port": role.Port, - "key_type": role.KeyType, - "key_bits": role.KeyBits, - "allowed_users": role.AllowedUsers, - "key_option_specs": role.KeyOptionSpecs, - // Returning install script will make the output look messy. - // But this is one way for clients to see the script that is - // being used to install the key. If there is some problem, - // the script can be modified and configured by clients. - "install_script": role.InstallScript, - }, - }, nil + roleInfo, err := b.parseRole(role) + if err != nil { + return nil, err } + + return &logical.Response{ + Data: roleInfo, + }, nil } func (b *backend) pathRoleDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { diff --git a/logical/response.go b/logical/response.go index 6ee452b686..ab92fd541b 100644 --- a/logical/response.go +++ b/logical/response.go @@ -110,3 +110,23 @@ func ListResponse(keys []string) *Response { } return resp } + +// ListResponseWithInfo is used to format a response to a list operation and +// return the keys as well as a map with corresponding key info. +func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { + resp := ListResponse(keys) + + keyInfoData := make(map[string]interface{}) + for _, key := range keys { + val, ok := keyInfo[key] + if ok { + keyInfoData[key] = val + } + } + + if len(keyInfoData) > 0 { + resp.Data["key_info"] = keyInfoData + } + + return resp +} From 1474ec63bc2dcaed84bd83402e0d4313cbf0e674 Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang Date: Fri, 3 Nov 2017 17:24:20 -0400 Subject: [PATCH 087/329] changelog++ --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 551b14fba0..90cf82cdba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,9 @@ DEPRECATIONS/CHANGES: * SSH CA role read changes: When reading back a role from the `ssh` backend, the TTL/max TTL values will now be an integer number of seconds rather than a string. This better matches the API elsewhere in Vault. + * SSH role list changes: When listing roles from the `ssh` backend via the API, + the response data will additionally return a `key_info` map that will contain + a map of each key with a corresponding object containing the `key_type`. FEATURES: From bcc6b3e2b9501f5b46302106f42a3682f2ec0756 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 3 Nov 2017 17:30:34 -0400 Subject: [PATCH 088/329] Update gocql dep --- vendor/github.com/gocql/gocql/AUTHORS | 1 + vendor/github.com/gocql/gocql/conn.go | 4 +- vendor/github.com/gocql/gocql/host_source.go | 84 ++++++++++++++----- .../github.com/gocql/gocql/host_source_gen.go | 45 ++++++++++ vendor/github.com/gocql/gocql/session.go | 22 +++-- vendor/vendor.json | 6 +- 6 files changed, 123 insertions(+), 39 deletions(-) create mode 100644 vendor/github.com/gocql/gocql/host_source_gen.go diff --git a/vendor/github.com/gocql/gocql/AUTHORS b/vendor/github.com/gocql/gocql/AUTHORS index 30d66d0550..9628c68039 100644 --- a/vendor/github.com/gocql/gocql/AUTHORS +++ b/vendor/github.com/gocql/gocql/AUTHORS @@ -97,3 +97,4 @@ Nayef Ghattas Michał Matczuk Ben Krebsbach Vivian Mathews +Sascha Steinbiss diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go index 0d927cd66c..74b179d541 100644 --- a/vendor/github.com/gocql/gocql/conn.go +++ b/vendor/github.com/gocql/gocql/conn.go @@ -1162,8 +1162,10 @@ func (c *Conn) localHostInfo() (*HostInfo, error) { return nil, err } + port := c.conn.RemoteAddr().(*net.TCPAddr).Port + // TODO(zariel): avoid doing this here - host, err := c.session.hostInfoFromMap(row) + host, err := c.session.hostInfoFromMap(row, port) if err != nil { return nil, err } diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go index 8422a0b50f..988324c2e9 100644 --- a/vendor/github.com/gocql/gocql/host_source.go +++ b/vendor/github.com/gocql/gocql/host_source.go @@ -336,27 +336,65 @@ func (h *HostInfo) setPort(port int) *HostInfo { } func (h *HostInfo) update(from *HostInfo) { + if h == from { + return + } + h.mu.Lock() defer h.mu.Unlock() - h.peer = from.peer - h.broadcastAddress = from.broadcastAddress - h.listenAddress = from.listenAddress - h.rpcAddress = from.rpcAddress - h.preferredIP = from.preferredIP - h.connectAddress = from.connectAddress - h.port = from.port - h.dataCenter = from.dataCenter - h.rack = from.rack - h.hostId = from.hostId - h.workload = from.workload - h.graph = from.graph - h.dseVersion = from.dseVersion - h.partitioner = from.partitioner - h.clusterName = from.clusterName - h.version = from.version - h.state = from.state - h.tokens = from.tokens + from.mu.RLock() + defer from.mu.RUnlock() + + // autogenerated do not update + if h.peer == nil { + h.peer = from.peer + } + if h.broadcastAddress == nil { + h.broadcastAddress = from.broadcastAddress + } + if h.listenAddress == nil { + h.listenAddress = from.listenAddress + } + if h.rpcAddress == nil { + h.rpcAddress = from.rpcAddress + } + if h.preferredIP == nil { + h.preferredIP = from.preferredIP + } + if h.connectAddress == nil { + h.connectAddress = from.connectAddress + } + if h.port == 0 { + h.port = from.port + } + if h.dataCenter == "" { + h.dataCenter = from.dataCenter + } + if h.rack == "" { + h.rack = from.rack + } + if h.hostId == "" { + h.hostId = from.hostId + } + if h.workload == "" { + h.workload = from.workload + } + if h.dseVersion == "" { + h.dseVersion = from.dseVersion + } + if h.partitioner == "" { + h.partitioner = from.partitioner + } + if h.clusterName == "" { + h.clusterName = from.clusterName + } + if h.version == (cassVersion{}) { + h.version = from.version + } + if h.tokens == nil { + h.tokens = from.tokens + } } func (h *HostInfo) IsUp() bool { @@ -402,13 +440,13 @@ func checkSystemSchema(control *controlConn) (bool, error) { // Given a map that represents a row from either system.local or system.peers // return as much information as we can in *HostInfo -func (s *Session) hostInfoFromMap(row map[string]interface{}) (*HostInfo, error) { +func (s *Session) hostInfoFromMap(row map[string]interface{}, port int) (*HostInfo, error) { const assertErrorMsg = "Assertion failed for %s" var ok bool // Default to our connected port if the cluster doesn't have port information host := HostInfo{ - port: s.cfg.Port, + port: port, } for key, value := range row { @@ -527,7 +565,7 @@ func (r *ringDescriber) getClusterPeerInfo() ([]*HostInfo, error) { for _, row := range rows { // extract all available info about the peer - host, err := r.session.hostInfoFromMap(row) + host, err := r.session.hostInfoFromMap(row, r.session.cfg.Port) if err != nil { return nil, err } else if !isValidPeer(host) { @@ -589,12 +627,12 @@ func (r *ringDescriber) getHostInfo(ip net.IP, port int) (*HostInfo, error) { } for _, row := range rows { - h, err := r.session.hostInfoFromMap(row) + h, err := r.session.hostInfoFromMap(row, port) if err != nil { return nil, err } - if host.ConnectAddress().Equal(ip) { + if h.ConnectAddress().Equal(ip) { host = h break } diff --git a/vendor/github.com/gocql/gocql/host_source_gen.go b/vendor/github.com/gocql/gocql/host_source_gen.go new file mode 100644 index 0000000000..c82193cbd4 --- /dev/null +++ b/vendor/github.com/gocql/gocql/host_source_gen.go @@ -0,0 +1,45 @@ +// +build genhostinfo + +package main + +import ( + "fmt" + "reflect" + "sync" + + "github.com/gocql/gocql" +) + +func gen(clause, field string) { + fmt.Printf("if h.%s == %s {\n", field, clause) + fmt.Printf("\th.%s = from.%s\n", field, field) + fmt.Println("}") +} + +func main() { + t := reflect.ValueOf(&gocql.HostInfo{}).Elem().Type() + mu := reflect.TypeOf(sync.RWMutex{}) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type == mu { + continue + } + + switch f.Type.Kind() { + case reflect.Slice: + gen("nil", f.Name) + case reflect.String: + gen(`""`, f.Name) + case reflect.Int: + gen("0", f.Name) + case reflect.Struct: + gen("("+f.Type.Name()+"{})", f.Name) + case reflect.Bool, reflect.Int32: + continue + default: + panic(fmt.Sprintf("unknown field: %s", f)) + } + } + +} diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go index 0ff6742910..1fa95e7634 100644 --- a/vendor/github.com/gocql/gocql/session.go +++ b/vendor/github.com/gocql/gocql/session.go @@ -160,17 +160,6 @@ func (s *Session) init() error { if err != nil { return err } - - allHosts := hosts - hosts = hosts[:0] - hostMap := make(map[string]*HostInfo, len(allHosts)) - for _, host := range allHosts { - if !s.cfg.filterHost(host) { - hosts = append(hosts, host) - hostMap[host.ConnectAddress().String()] = host - } - } - s.ring.endpoints = hosts if !s.cfg.disableControlConn { @@ -199,12 +188,21 @@ func (s *Session) init() error { return err } s.policy.SetPartitioner(partitioner) + filteredHosts := make([]*HostInfo, 0, len(newHosts)) for _, host := range newHosts { - hostMap[host.ConnectAddress().String()] = host + if !s.cfg.filterHost(host) { + filteredHosts = append(filteredHosts, host) + } } + hosts = filteredHosts } } + hostMap := make(map[string]*HostInfo, len(hosts)) + for _, host := range hosts { + hostMap[host.ConnectAddress().String()] = host + } + for _, host := range hostMap { host = s.ring.addOrUpdate(host) s.addNewNode(host) diff --git a/vendor/vendor.json b/vendor/vendor.json index fac77d1374..f4c68a9a5a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -853,10 +853,10 @@ "revisionTime": "2017-10-17T18:16:16Z" }, { - "checksumSHA1": "Gk4WdAKNJ1F28JTy4aXUAIHrLZE=", + "checksumSHA1": "QJoVB4TVwjbvG4zvJEHxDxXWE90=", "path": "github.com/gocql/gocql", - "revision": "7fabcab850b7eade1d0e0ac5ac0b6b66d6801a75", - "revisionTime": "2017-10-26T22:00:49Z" + "revision": "aa93acc8f067e9233982b9a7de370e4d64b9c33e", + "revisionTime": "2017-11-03T11:33:55Z" }, { "checksumSHA1": "7RlYIbPYgPkxDDCSEuE6bvYEEeU=", From 22e156712c080d6acbf9d767ab240cd1dcddc175 Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang Date: Fri, 3 Nov 2017 18:00:46 -0400 Subject: [PATCH 089/329] Update SSH list roles docs (#3536) --- website/source/api/secret/ssh/index.html.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/website/source/api/secret/ssh/index.html.md b/website/source/api/secret/ssh/index.html.md index c124eac173..5b28c04541 100644 --- a/website/source/api/secret/ssh/index.html.md +++ b/website/source/api/secret/ssh/index.html.md @@ -310,7 +310,15 @@ $ curl \ { "auth": null, "data": { - "keys": ["dev", "prod"] + "keys": ["dev", "prod"], + "key_info": { + "dev": { + "key_type": "ca" + }, + "prod": { + "key_type": "dynamic" + } + } }, "lease_duration": 2764800, "lease_id": "", From 6560e3c24adf9bdaf56371b0cbfecacd6d19e256 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 14:28:30 +0000 Subject: [PATCH 090/329] Attaching secretToken to backend --- builtin/logical/nomad/backend.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index ea507e08fc..94e3f4e7de 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -31,6 +31,7 @@ func Backend() *backend { secretToken(&b), }, BackendType: logical.TypeLogical, + Clean: b.resetClient, } return &b From d1e3eff61808f74f97b899dd6c161b83d4fab6e9 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 15:09:56 +0000 Subject: [PATCH 091/329] Refactored Lease into the Backend configuration --- builtin/logical/nomad/backend.go | 19 +++++ .../{path_config.go => path_config_access.go} | 0 builtin/logical/nomad/path_config_lease.go | 83 +++++++++++++++++++ builtin/logical/nomad/path_roles.go | 27 +----- builtin/logical/nomad/path_token.go | 1 - builtin/logical/nomad/secret_token.go | 9 +- 6 files changed, 113 insertions(+), 26 deletions(-) rename builtin/logical/nomad/{path_config.go => path_config_access.go} (100%) create mode 100644 builtin/logical/nomad/path_config_lease.go diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index 94e3f4e7de..e346f827e9 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -22,6 +22,7 @@ func Backend() *backend { b.Backend = &framework.Backend{ Paths: []*framework.Path{ pathConfigAccess(), + pathConfigLease(&b), pathListRoles(&b), pathRoles(), pathToken(&b), @@ -89,3 +90,21 @@ func (b *backend) resetClient() { b.client = nil } + +// Lease returns the lease information +func (b *backend) Lease(s logical.Storage) (*configLease, error) { + entry, err := s.Get("config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} diff --git a/builtin/logical/nomad/path_config.go b/builtin/logical/nomad/path_config_access.go similarity index 100% rename from builtin/logical/nomad/path_config.go rename to builtin/logical/nomad/path_config_access.go diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go new file mode 100644 index 0000000000..8625aa6fee --- /dev/null +++ b/builtin/logical/nomad/path_config_lease.go @@ -0,0 +1,83 @@ +package nomad + +import ( + "time" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/framework" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + Fields: map[string]*framework.FieldSchema{ + "ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: "Duration before which the issued token needs renewal", + }, + "max_ttl": &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 0, + Description: `Duration after which the issued token should not be allowed to be renewed`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathLeaseRead, + logical.UpdateOperation: b.pathLeaseUpdate, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +// Sets the lease configuration parameters +func (b *backend) pathLeaseUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: time.Second * time.Duration(d.Get("ttl").(int)), + MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(entry); err != nil { + return nil, err + } + + return nil, nil +} + +// Returns the lease configuration parameters +func (b *backend) pathLeaseRead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + lease.TTL = lease.TTL / time.Second + lease.MaxTTL = lease.MaxTTL / time.Second + + return &logical.Response{ + Data: structs.New(lease).Map(), + }, nil +} + +// Lease configuration information for the secrets issued by this backend +type configLease struct { + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"` +} + +var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated tokens" + +var pathConfigLeaseHelpDesc = ` +Sets the ttl and max_ttl values for the secrets to be issued by this backend. +Both ttl and max_ttl takes in an integer number of seconds as input as well as +inputs like "1h". +` diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 3beec71754..f1c183490d 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,9 +1,6 @@ package nomad import ( - "fmt" - "time" - "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" ) @@ -45,11 +42,6 @@ or 'management'. If a 'management' token, the "policy" parameter is not required. Defaults to 'client'.`, }, - - "lease": &framework.FieldSchema{ - Type: framework.TypeString, - Description: "Lease time of the role.", - }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -90,7 +82,6 @@ func pathRolesRead( // Generate the response resp := &logical.Response{ Data: map[string]interface{}{ - "lease": result.Lease.String(), "token_type": result.TokenType, "global": result.Global, }, @@ -129,19 +120,8 @@ func pathRolesWrite( } } - var lease time.Duration - leaseParam := d.Get("lease").(string) - if leaseParam != "" { - lease, err = time.ParseDuration(leaseParam) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "error parsing given lease of %s: %s", leaseParam, err)), nil - } - } - entry, err := logical.StorageEntryJSON("role/"+name, roleConfig{ Policy: policy, - Lease: lease, TokenType: tokenType, Global: global, }) @@ -166,8 +146,7 @@ func pathRolesDelete( } type roleConfig struct { - Policy []string `json:"policy"` - Lease time.Duration `json:"lease"` - TokenType string `json:"token_type"` - Global bool `json:"global"` + Policy []string `json:"policy"` + TokenType string `json:"token_type"` + Global bool `json:"global"` } diff --git a/builtin/logical/nomad/path_token.go b/builtin/logical/nomad/path_token.go index 28a397858e..ac69e0d0c2 100644 --- a/builtin/logical/nomad/path_token.go +++ b/builtin/logical/nomad/path_token.go @@ -73,7 +73,6 @@ func (b *backend) pathTokenRead( }, map[string]interface{}{ "accessor_id": token.AccessorID, }) - s.Secret.TTL = result.Lease return s, nil } diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index 5ba048a8e5..d3455cd681 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -26,8 +26,15 @@ func secretToken(b *backend) *framework.Secret { func (b *backend) secretTokenRenew( req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } - return framework.LeaseExtend(0, 0, b.System())(req, d) + return framework.LeaseExtend(lease.TTL, lease.MaxTTL, b.System())(req, d) } func (b *backend) secretTokenRevoke( From f9c30bff200850aebcb93e08a140152905b41d94 Mon Sep 17 00:00:00 2001 From: Nicolas Corrarello Date: Mon, 6 Nov 2017 15:13:50 +0000 Subject: [PATCH 092/329] Updated documentation --- website/source/api/secret/nomad/index.html.md | 33 +++++++++++++++++++ .../source/docs/secrets/nomad/index.html.md | 9 +++++ 2 files changed, 42 insertions(+) diff --git a/website/source/api/secret/nomad/index.html.md b/website/source/api/secret/nomad/index.html.md index d3379b2852..1b46c4ecea 100644 --- a/website/source/api/secret/nomad/index.html.md +++ b/website/source/api/secret/nomad/index.html.md @@ -52,6 +52,39 @@ $ curl \ https://vault.rocks/v1/nomad/config/access ``` +## Configure Lease + +This endpoint configures the lease settings for generated tokens. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `POST` | `/nomad/config/lease` | `204 (empty body)` | + +### Parameters + +- `ttl` `(int: 0)` – Specifies the lease ttl provided in seconds. + +- `max_ttl` `(int: 0)` – Specifies the maximum ttl provided in seconds. + +### Sample Payload + +```json +{ + "ttl": 1800, + "max_ttl": 3600 +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://vault.rocks/v1/nomad/config/lease +``` + ## Create/Update Role This endpoint creates or updates the Nomad role definition in Vault. If the role does not exist, it will be created. If the role already exists, it will receive diff --git a/website/source/docs/secrets/nomad/index.html.md b/website/source/docs/secrets/nomad/index.html.md index 754426762a..aa987e7791 100644 --- a/website/source/docs/secrets/nomad/index.html.md +++ b/website/source/docs/secrets/nomad/index.html.md @@ -28,6 +28,15 @@ Unlike the `generic` backend, the `nomad` backend is not mounted by default. $ vault mount nomad Successfully mounted 'nomad' at 'nomad'! ``` + +Optionally, we can configure the lease settings for credentials generated +by Vault. This is done by writing to the `config/lease` key: + +``` +$ vault write nomad/config/lease ttl=3600 max_ttl=86400 +Success! Data written to: nomad/config/lease +``` + For a quick start, you can use the SecretID token provided by the [Nomad ACL bootstrap process](https://www.nomadproject.io/guides/acl.html#generate-the-initial-token), although this is discouraged for production deployments. From 33cf98026e54741af1a57ee54868f2e80ade01f5 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 6 Nov 2017 12:05:07 -0500 Subject: [PATCH 093/329] Add PKCS8 marshaling to PKI (#3518) --- builtin/logical/pki/cert_util.go | 66 +++++++++++ builtin/logical/pki/fields.go | 11 ++ builtin/logical/pki/path_intermediate.go | 7 ++ builtin/logical/pki/path_issue_sign.go | 7 ++ builtin/logical/pki/path_root.go | 7 ++ helper/certutil/pkcs8.go | 119 ++++++++++++++++++++ helper/certutil/pkcs8_test.go | 110 ++++++++++++++++++ website/source/api/secret/pki/index.html.md | 17 ++- 8 files changed, 343 insertions(+), 1 deletion(-) create mode 100644 helper/certutil/pkcs8.go create mode 100644 helper/certutil/pkcs8_test.go diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index b4bb38173b..9356cdde8d 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -2,6 +2,7 @@ package pki import ( "bytes" + "crypto" "crypto/ecdsa" "crypto/rand" "crypto/rsa" @@ -9,6 +10,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/asn1" + "encoding/base64" "encoding/pem" "fmt" "net" @@ -16,6 +18,7 @@ import ( "strings" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/certutil" "github.com/hashicorp/vault/helper/errutil" "github.com/hashicorp/vault/helper/parseutil" @@ -1183,3 +1186,66 @@ NameCheck: return fmt.Errorf("name %q disallowed by CA's permitted DNS domains", badName) } + +func convertRespToPKCS8(resp *logical.Response) error { + privRaw, ok := resp.Data["private_key"] + if !ok { + return nil + } + priv, ok := privRaw.(string) + if !ok { + return fmt.Errorf("error converting response to pkcs8: could not parse original value as string") + } + + privKeyTypeRaw, ok := resp.Data["private_key_type"] + if !ok { + return fmt.Errorf("error converting response to pkcs8: %q not found in response", "private_key_type") + } + privKeyType, ok := privKeyTypeRaw.(certutil.PrivateKeyType) + if !ok { + return fmt.Errorf("error converting response to pkcs8: could not parse original type value as string") + } + + var keyData []byte + var pemUsed bool + var err error + var signer crypto.Signer + + block, _ := pem.Decode([]byte(priv)) + if block == nil { + keyData, err = base64.StdEncoding.DecodeString(priv) + if err != nil { + return errwrap.Wrapf("error converting response to pkcs8: error decoding original value: {{err}}", err) + } + } else { + keyData = block.Bytes + pemUsed = true + } + + switch privKeyType { + case certutil.RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(keyData) + case certutil.ECPrivateKey: + signer, err = x509.ParseECPrivateKey(keyData) + default: + return fmt.Errorf("unknown private key type %q", privKeyType) + } + if err != nil { + return errwrap.Wrapf("error converting response to pkcs8: error parsing previous key: {{err}}", err) + } + + keyData, err = certutil.MarshalPKCS8PrivateKey(signer) + if err != nil { + return errwrap.Wrapf("error converting response to pkcs8: error marshaling pkcs8 key: {{err}}", err) + } + + if pemUsed { + block.Type = "PRIVATE KEY" + block.Bytes = keyData + resp.Data["private_key"] = string(pem.EncodeToMemory(block)) + } else { + resp.Data["private_key"] = base64.StdEncoding.EncodeToString(keyData) + } + + return nil +} diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index 52adf10eb7..9aa1d4b556 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -22,6 +22,17 @@ key and issuing cert will be appended to the certificate pem. Defaults to "pem".`, } + fields["private_key_format"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "der", + Description: `Format for the returned private key. +Generally the default will be controlled by the "format" +parameter as either base64-encoded DER or PEM-encoded DER. +However, this can be set to "pkcs8" to have the returned +private key contain base64-encoded pkcs8 or PEM-encoded +pkcs8 instead. Defaults to "der".`, + } + fields["ip_sans"] = &framework.FieldSchema{ Type: framework.TypeString, Description: `The requested IP SANs, if any, in a diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index 2073621d3c..f989393796 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -106,6 +106,13 @@ func (b *backend) pathGenerateIntermediate( } } + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + cb := &certutil.CertBundle{} cb.PrivateKey = csrb.PrivateKey cb.PrivateKeyType = csrb.PrivateKeyType diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index a297c6ab9a..d6e9b7368c 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -268,6 +268,13 @@ func (b *backend) pathIssueSignCert( resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) } + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + if !role.NoStore { err = req.Storage.Put(&logical.StorageEntry{ Key: "certs/" + normalizeSerial(cb.SerialNumber), diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index 3f18eecae2..0c4c1e7434 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -188,6 +188,13 @@ func (b *backend) pathCAGenerateRoot( } } + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + // Store it as the CA bundle entry, err = logical.StorageEntryJSON("config/ca_bundle", cb) if err != nil { diff --git a/helper/certutil/pkcs8.go b/helper/certutil/pkcs8.go new file mode 100644 index 0000000000..22585de0cc --- /dev/null +++ b/helper/certutil/pkcs8.go @@ -0,0 +1,119 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package certutil + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form. +// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey. +// Unsupported key types result in an error. +// +// See RFC 5208. +func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) { + var privKey pkcs8 + + switch k := key.(type) { + case *rsa.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyRSA, + Parameters: asn1.NullRawValue, + } + privKey.PrivateKey = x509.MarshalPKCS1PrivateKey(k) + + case *ecdsa.PrivateKey: + oid, ok := oidFromNamedCurve(k.Curve) + if !ok { + return nil, errors.New("x509: unknown curve while marshalling to PKCS#8") + } + + oidBytes, err := asn1.Marshal(oid) + if err != nil { + return nil, errors.New("x509: failed to marshal curve OID: " + err.Error()) + } + + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyECDSA, + Parameters: asn1.RawValue{ + FullBytes: oidBytes, + }, + } + + if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil { + return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error()) + } + + default: + return nil, fmt.Errorf("x509: unknown key type while marshalling PKCS#8: %T", key) + } + + return asn1.Marshal(privKey) +} + +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and +// sets the curve ID to the given OID, or omits it if OID is nil. +func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) { + privateKeyBytes := key.D.Bytes() + paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8) + copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes) + + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: paddedPrivateKey, + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} diff --git a/helper/certutil/pkcs8_test.go b/helper/certutil/pkcs8_test.go new file mode 100644 index 0000000000..df350ead41 --- /dev/null +++ b/helper/certutil/pkcs8_test.go @@ -0,0 +1,110 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package certutil + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "reflect" + "testing" +) + +// Generated using: +// openssl genrsa 1024 | openssl pkcs8 -topk8 -nocrypt +var pkcs8RSAPrivateKeyHex = `30820278020100300d06092a864886f70d0101010500048202623082025e02010002818100cfb1b5bf9685ffa97b4f99df4ff122b70e59ac9b992f3bc2b3dde17d53c1a34928719b02e8fd17839499bfbd515bd6ef99c7a1c47a239718fe36bfd824c0d96060084b5f67f0273443007a24dfaf5634f7772c9346e10eb294c2306671a5a5e719ae24b4de467291bc571014b0e02dec04534d66a9bb171d644b66b091780e8d020301000102818100b595778383c4afdbab95d2bfed12b3f93bb0a73a7ad952f44d7185fd9ec6c34de8f03a48770f2009c8580bcd275e9632714e9a5e3f32f29dc55474b2329ff0ebc08b3ffcb35bc96e6516b483df80a4a59cceb71918cbabf91564e64a39d7e35dce21cb3031824fdbc845dba6458852ec16af5dddf51a8397a8797ae0337b1439024100ea0eb1b914158c70db39031dd8904d6f18f408c85fbbc592d7d20dee7986969efbda081fdf8bc40e1b1336d6b638110c836bfdc3f314560d2e49cd4fbde1e20b024100e32a4e793b574c9c4a94c8803db5152141e72d03de64e54ef2c8ed104988ca780cd11397bc359630d01b97ebd87067c5451ba777cf045ca23f5912f1031308c702406dfcdbbd5a57c9f85abc4edf9e9e29153507b07ce0a7ef6f52e60dcfebe1b8341babd8b789a837485da6c8d55b29bbb142ace3c24a1f5b54b454d01b51e2ad03024100bd6a2b60dee01e1b3bfcef6a2f09ed027c273cdbbaf6ba55a80f6dcc64e4509ee560f84b4f3e076bd03b11e42fe71a3fdd2dffe7e0902c8584f8cad877cdc945024100aa512fa4ada69881f1d8bb8ad6614f192b83200aef5edf4811313d5ef30a86cbd0a90f7b025c71ea06ec6b34db6306c86b1040670fd8654ad7291d066d06d031` + +// Generated using: +// openssl ecparam -genkey -name secp224r1 | openssl pkcs8 -topk8 -nocrypt +var pkcs8P224PrivateKeyHex = `3078020100301006072a8648ce3d020106052b810400210461305f020101041cca3d72b3e88fed2684576dad9b80a9180363a5424986900e3abcab3fa13c033a0004f8f2a6372872a4e61263ed893afb919576a4cacfecd6c081a2cbc76873cf4ba8530703c6042b3a00e2205087e87d2435d2e339e25702fae1` + +// Generated using: +// openssl ecparam -genkey -name secp256r1 | openssl pkcs8 -topk8 -nocrypt +var pkcs8P256PrivateKeyHex = `308187020100301306072a8648ce3d020106082a8648ce3d030107046d306b0201010420dad6b2f49ca774c36d8ae9517e935226f667c929498f0343d2424d0b9b591b43a14403420004b9c9b90095476afe7b860d8bd43568cab7bcb2eed7b8bf2fa0ce1762dd20b04193f859d2d782b1e4cbfd48492f1f533113a6804903f292258513837f07fda735` + +// Generated using: +// openssl ecparam -genkey -name secp384r1 | openssl pkcs8 -topk8 -nocrypt +var pkcs8P384PrivateKeyHex = `3081b6020100301006072a8648ce3d020106052b8104002204819e30819b02010104309bf832f6aaaeacb78ce47ffb15e6fd0fd48683ae79df6eca39bfb8e33829ac94aa29d08911568684c2264a08a4ceb679a164036200049070ad4ed993c7770d700e9f6dc2baa83f63dd165b5507f98e8ff29b5d2e78ccbe05c8ddc955dbf0f7497e8222cfa49314fe4e269459f8e880147f70d785e530f2939e4bf9f838325bb1a80ad4cf59272ae0e5efe9a9dc33d874492596304bd3` + +// Generated using: +// openssl ecparam -genkey -name secp521r1 | openssl pkcs8 -topk8 -nocrypt +// +// Note that OpenSSL will truncate the private key if it can (i.e. it emits it +// like an integer, even though it's an OCTET STRING field). Thus if you +// regenerate this you may, randomly, find that it's a byte shorter than +// expected and the Go test will fail to recreate it exactly. +var pkcs8P521PrivateKeyHex = `3081ee020100301006072a8648ce3d020106052b810400230481d63081d3020101044200cfe0b87113a205cf291bb9a8cd1a74ac6c7b2ebb8199aaa9a5010d8b8012276fa3c22ac913369fa61beec2a3b8b4516bc049bde4fb3b745ac11b56ab23ac52e361a1818903818600040138f75acdd03fbafa4f047a8e4b272ba9d555c667962b76f6f232911a5786a0964e5edea6bd21a6f8725720958de049c6e3e6661c1c91b227cebee916c0319ed6ca003db0a3206d372229baf9dd25d868bf81140a518114803ce40c1855074d68c4e9dab9e65efba7064c703b400f1767f217dac82715ac1f6d88c74baf47a7971de4ea` + +func TestPKCS8(t *testing.T) { + tests := []struct { + name string + keyHex string + keyType reflect.Type + curve elliptic.Curve + }{ + { + name: "RSA private key", + keyHex: pkcs8RSAPrivateKeyHex, + keyType: reflect.TypeOf(&rsa.PrivateKey{}), + }, + { + name: "P-224 private key", + keyHex: pkcs8P224PrivateKeyHex, + keyType: reflect.TypeOf(&ecdsa.PrivateKey{}), + curve: elliptic.P224(), + }, + { + name: "P-256 private key", + keyHex: pkcs8P256PrivateKeyHex, + keyType: reflect.TypeOf(&ecdsa.PrivateKey{}), + curve: elliptic.P256(), + }, + { + name: "P-384 private key", + keyHex: pkcs8P384PrivateKeyHex, + keyType: reflect.TypeOf(&ecdsa.PrivateKey{}), + curve: elliptic.P384(), + }, + { + name: "P-521 private key", + keyHex: pkcs8P521PrivateKeyHex, + keyType: reflect.TypeOf(&ecdsa.PrivateKey{}), + curve: elliptic.P521(), + }, + } + + for _, test := range tests { + derBytes, err := hex.DecodeString(test.keyHex) + if err != nil { + t.Errorf("%s: failed to decode hex: %s", test.name, err) + continue + } + privKey, err := x509.ParsePKCS8PrivateKey(derBytes) + if err != nil { + t.Errorf("%s: failed to decode PKCS#8: %s", test.name, err) + continue + } + if reflect.TypeOf(privKey) != test.keyType { + t.Errorf("%s: decoded PKCS#8 returned unexpected key type: %T", test.name, privKey) + continue + } + if ecKey, isEC := privKey.(*ecdsa.PrivateKey); isEC && ecKey.Curve != test.curve { + t.Errorf("%s: decoded PKCS#8 returned unexpected curve %#v", test.name, ecKey.Curve) + continue + } + reserialised, err := MarshalPKCS8PrivateKey(privKey) + if err != nil { + t.Errorf("%s: failed to marshal into PKCS#8: %s", test.name, err) + continue + } + if !bytes.Equal(derBytes, reserialised) { + t.Errorf("%s: marshalled PKCS#8 didn't match original: got %x, want %x", test.name, reserialised, derBytes) + continue + } + } +} diff --git a/website/source/api/secret/pki/index.html.md b/website/source/api/secret/pki/index.html.md index 58fbf83c15..87f37c7878 100644 --- a/website/source/api/secret/pki/index.html.md +++ b/website/source/api/secret/pki/index.html.md @@ -296,7 +296,7 @@ $ curl \ "data": { "issuing_certificates": ["", ""], "crl_distribution_points": ["", ""], - "ocsp_servers": ["", ""], + "ocsp_servers": ["", ""] }, "auth": null } @@ -437,6 +437,11 @@ can be set in a CSR are supported. base64 encoded. If `pem_bundle`, the `csr` field will contain the private key (if exported) and CSR, concatenated. +- `private_key_format` `(string: "")` – Specifies the format for marshaling the + private key. Defaults to `der` which will return either base64-encoded DER or + PEM-encoded DER, depending on the value of `format`. The other option is + `pkcs8` which will return the key marshalled as PEM-encoded PKCS8. + - `key_type` `(string: "rsa")` – Specifies the desired key type; must be `rsa` or `ec`. @@ -556,6 +561,11 @@ need to request a new certificate.** private key and certificate, concatenated; if the issuing CA is not a Vault-derived self-signed root, this will be included as well. +- `private_key_format` `(string: "")` – Specifies the format for marshaling the + private key. Defaults to `der` which will return either base64-encoded DER or + PEM-encoded DER, depending on the value of `format`. The other option is + `pkcs8` which will return the key marshalled as PEM-encoded PKCS8. + - `exclude_cn_from_sans` `(bool: false)` – If true, the given `common_name` will not be included in DNS or Email Subject Alternate Names (as appropriate). Useful if the CN is not a hostname or email address, but is instead some @@ -932,6 +942,11 @@ Vault would overwrite the existing cert/key with new values. exported) and certificate, concatenated; if the issuing CA is not a Vault-derived self-signed root, this will be included as well. +- `private_key_format` `(string: "")` – Specifies the format for marshaling the + private key. Defaults to `der` which will return either base64-encoded DER or + PEM-encoded DER, depending on the value of `format`. The other option is + `pkcs8` which will return the key marshalled as PEM-encoded PKCS8. + - `key_type` `(string: "rsa")` – Specifies the desired key type; must be `rsa` or `ec`. From eca0d100dc0dcc920d7a2d07aed465cb3ff118a3 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 6 Nov 2017 12:05:44 -0500 Subject: [PATCH 094/329] changelog++ --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90cf82cdba..52313b9daa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ IMPROVEMENTS: themselves were `600` and are all encrypted, but this doesn't hurt. * secret/cassandra: Work around Cassandra ignoring consistency levels for a user listing query [GH-3469] + * secret/pki: Private keys can now be marshalled as PKCS#8 [GH-3518] * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON arrays [GH-3409] * secret/ssh: Role TTL/max TTL can now be specified as either a string or an From 8ac7868aa9e4239168f0252e3df5fa161042aa15 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 6 Nov 2017 12:06:19 -0500 Subject: [PATCH 095/329] Minor client refactoring (#3539) --- api/client.go | 57 ++++++++++++++++++++++++++++++--------------------- meta/meta.go | 6 ------ 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/api/client.go b/api/client.go index ca34f51f35..f1f6754d35 100644 --- a/api/client.go +++ b/api/client.go @@ -13,6 +13,7 @@ import ( "sync" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/vault/helper/parseutil" @@ -63,6 +64,10 @@ type Config struct { // Timeout is for setting custom timeout parameter in the HttpClient Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -110,17 +115,15 @@ func DefaultConfig() *Config { MinVersion: tls.VersionTLS12, } if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err return nil } if err := config.ReadEnvironment(); err != nil { + config.Error = err return nil } - if v := os.Getenv(EnvVaultAddress); v != "" { - config.Address = v - } - // Ensure redirects are not automatically followed // Note that this is sane for the API client as it has its own // redirect handling logic (and thus also for command/meta), @@ -142,36 +145,41 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error { if c.HttpClient == nil { c.HttpClient = DefaultConfig().HttpClient } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig var clientCert tls.Certificate foundClientCert := false - if t.CACert != "" || t.CAPath != "" || t.ClientCert != "" || t.ClientKey != "" || t.Insecure { - if t.ClientCert != "" && t.ClientKey != "" { - var err error - clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) - if err != nil { - return err - } - foundClientCert = true - } else if t.ClientCert != "" || t.ClientKey != "" { - return fmt.Errorf("Both client cert and client key must be provided") + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("Both client cert and client key must be provided") + } + + if t.CACert != "" || t.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err } } - clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig - rootConfig := &rootcerts.Config{ - CAFile: t.CACert, - CAPath: t.CAPath, + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true } - if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { - return err - } - - clientTLSConfig.InsecureSkipVerify = t.Insecure if foundClientCert { clientTLSConfig.Certificates = []tls.Certificate{clientCert} } + if t.TLSServerName != "" { clientTLSConfig.ServerName = t.TLSServerName } @@ -290,6 +298,9 @@ func NewClient(c *Config) (*Client, error) { if def == nil { return nil, fmt.Errorf("could not create/read default configuration") } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } if c == nil { c = def diff --git a/meta/meta.go b/meta/meta.go index d75f1054c5..dcb9f7f3d5 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -6,7 +6,6 @@ import ( "io" "os" - "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/token" "github.com/hashicorp/vault/helper/flag-slice" @@ -80,11 +79,6 @@ func (m *Meta) DefaultWrappingLookupFunc(operation, path string) string { func (m *Meta) Client() (*api.Client, error) { config := api.DefaultConfig() - err := config.ReadEnvironment() - if err != nil { - return nil, errwrap.Wrapf("error reading environment: {{err}}", err) - } - if m.flagAddress != "" { config.Address = m.flagAddress } From ee438809d6b05dda0e11239115786df7bcfa62a9 Mon Sep 17 00:00:00 2001 From: Jason Antman Date: Mon, 6 Nov 2017 12:11:02 -0500 Subject: [PATCH 096/329] Add third party tools list to website (#3488) --- .../source/docs/relatedtools/index.html.md | 28 +++++++++++++++++++ website/source/layouts/docs.erb | 4 +++ 2 files changed, 32 insertions(+) create mode 100644 website/source/docs/relatedtools/index.html.md diff --git a/website/source/docs/relatedtools/index.html.md b/website/source/docs/relatedtools/index.html.md new file mode 100644 index 0000000000..178917cef0 --- /dev/null +++ b/website/source/docs/relatedtools/index.html.md @@ -0,0 +1,28 @@ +--- +layout: "docs" +page_title: "Related Tools" +sidebar_current: "docs-related-related" +description: |- + Short list of third-party tools that work with or are related to Vault. +--- + +# Related Tools + +## Hashicorp Tools + +* The [Terraform Vault provider](https://www.terraform.io/docs/providers/vault/index.html) can read from, write to, and configure Vault from [HashiCorp Terraform](https://www.terraform.io/) +* [consul-template](https://github.com/hashicorp/consul-template) is a template renderer, notifier, and supervisor for HashiCorp Consul and Vault data +* [envconsul](https://github.com/hashicorp/envconsul) allows you to read and set environmental variables for processes from Consul and Vault data +* The [vault-ssh-helper](https://github.com/hashicorp/vault-ssh-helper) can be used to enable one-time passwords for SSH authentication via Vault + +## Third-Party Tools + +The following list of tools is maintained by the community of Vault users; HashiCorp has not tested or approved them and makes no claims as to their suitability or security. + +* [HashiCorp Vault Jenkins plugin](https://plugins.jenkins.io/hashicorp-vault-plugin) - a Jenkins plugin for injecting Vault secrets into the build environment +* [Spring Vault](http://projects.spring.io/spring-vault/) - a Java Spring project for working with Vault secrets +* [vault-exec](https://github.com/kmanning/vault_exec) - a shell wrapper to execute arbitrary scripts using temporary AWS credentials managed by Vault +* [pouch](https://github.com/tuenti/pouch) - A set of tools to manage provisioning of secrets on hosts based on the AppRole authentication method of Vault +* [vault-aws-creds](https://github.com/jantman/vault-aws-creds) - Python helper to export Vault-provided temporary AWS creds into the environment + +Want to add your own project, or one that you use? Additions are welcome via [pull requests](https://github.com/hashicorp/vault/blob/master/website/source/docs/relatedtools/index.html.md). diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9950198191..ccd368b0cc 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -362,6 +362,10 @@
+ > + Related Tools + + > Vault Enterprise + > + seal + + > storage From be83081134f332423cd3a8860d39ae11b1f3e4ed Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 14 Nov 2017 11:56:05 -0500 Subject: [PATCH 164/329] changelog++ --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9173e8686..d58ba6cebb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,9 +22,13 @@ DEPRECATIONS/CHANGES: * SSH role list changes: When listing roles from the `ssh` backend via the API, the response data will additionally return a `key_info` map that will contain a map of each key with a corresponding object containing the `key_type`. + * More granularity in audit logs: Audit request and response entires are still + in RFC3339 format but now have a granularity of nanoseconds. FEATURES: + * **RSA Support for Transit Backend**: Transit backend can now generate RSA + keys which can be used for encryption and signing. [GH-3489] * **Identity System**: Now in open source and with significant enhancements, Identity is an integrated system for understanding users across tokens and enabling easier management of users directly and via groups. @@ -42,8 +46,25 @@ FEATURES: * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel to create extremely flexible access control policies -- even on unauthenticated endpoints. - * **RSA Support for Transit Backend**: Transit backend can now generate RSA - keys which can be used for encryption and signing. [GH-3489] + * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing + functionality, the `rekey` operation is now supported; it uses recovery keys + to authorize the master key rekey. + * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using + Disaster Recovery replication, a token can be created that can be used to + authorize actions such as promotion and updating primary information, rather + than using recovery keys. + * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using + auto-unsealing, a request to unseal Vault can be triggered by a threshold of + recovery keys, rather than requiring the Vault process to be restarted. + * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise + UI. The look and feel has been completely redesigned to give users a better + experience and make managing secrets fast and easy. + * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, + create and browse roles. And use them to sign keys or generate one time + passwords. + * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS + backend via the Vault Enterprise UI. In addition you can create roles, + browse the roles and Generate IAM Credentials from them in the UI. IMPROVEMENTS: @@ -64,6 +85,9 @@ IMPROVEMENTS: * secret/transit: Sign and verify operations now support a `none` hash algorithm to allow signing/verifying pre-hashed data [GH-3448] * secret/database: Add the ability to glob allowed roles in the Database Backend [GH-3387] + * ui (enterprise): Support for RSA keys in the transit backend + * ui (enterprise): Support for DR Operation Token generation, promoting, and + updating primary on DR Secondary clusters BUG FIXES: @@ -88,6 +112,8 @@ BUG FIXES: * plugins: Allow response errors to be returned from backend plugins [GH-3412] * secret/transit: Fix panic if the length of the input ciphertext was less than the expected nonce length [GH-3521] + * ui (enterprise): Reinstate support for generic secret backends - this was + erroneously removed in a previous release ## 0.8.3 (September 19th, 2017) From bdac1854478538052ba5b7ec9a9ec688d35a3335 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 14 Nov 2017 11:57:40 -0500 Subject: [PATCH 165/329] Cut version 0.9.0 From e56f1beda922d191049597b7aabe1f6e08618557 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 14 Nov 2017 12:26:58 -0500 Subject: [PATCH 166/329] Fix some broken links --- website/source/docs/configuration/index.html.md | 4 ++-- website/source/docs/configuration/seal/index.html.md | 4 ++-- website/source/docs/enterprise/hsm/behavior.html.md | 2 +- website/source/docs/enterprise/hsm/security.html.md | 2 +- website/source/docs/enterprise/sealwrap/index.html.md | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/docs/configuration/index.html.md b/website/source/docs/configuration/index.html.md index 7b0bc8120b..7660e1a461 100644 --- a/website/source/docs/configuration/index.html.md +++ b/website/source/docs/configuration/index.html.md @@ -59,7 +59,7 @@ to specify where the configuration is. Vault is listening for API requests. - `seal` ([Seal][seal]: nil) – Configures the seal type to use for - [seal wrapping][sealwrapping] as an additional layer of data protection. + [seal wrapping][sealwrap] as an additional layer of data protection. - `cache_size` `(string: "32000")` – Specifies the size of the read cache used by the physical storage subsystem. The value is in number of entries, so the @@ -136,6 +136,6 @@ The following parameters are used on backends that support [high availability][h [storage-backend]: /docs/configuration/storage/index.html [listener]: /docs/configuration/listener/index.html [seal]: /docs/configuration/seal/index.html -[sealwrapping]: /docs/enterprise/sealwrapping/index.html +[sealwrap]: /docs/enterprise/sealwrap/index.html [telemetry]: /docs/configuration/telemetry.html [high-availability]: /docs/concepts/ha.html diff --git a/website/source/docs/configuration/seal/index.html.md b/website/source/docs/configuration/seal/index.html.md index e48f4d83a4..ba8ab542db 100644 --- a/website/source/docs/configuration/seal/index.html.md +++ b/website/source/docs/configuration/seal/index.html.md @@ -14,7 +14,7 @@ master key. This stanza is optional, and in the case of the master key, Vault will use the Shamir algorithm to cryptographically split the master key if this is not configured. -As of Vault 0.9.0, the seal can also be used for [seal wrapping][sealwrapping] to +As of Vault 0.9.0, the seal can also be used for [seal wrapping][sealwrap] to add an extra layer of protection and satisfy compliance and regulatory requirements. ## Configuration @@ -39,4 +39,4 @@ seal "pkcs11" { For configuration options which also read an environment variable, the environment variable will take precedence over values in the configuration file. -[sealwrapping]: /docs/enterprise/sealwrapping/index.html \ No newline at end of file +[sealwrap]: /docs/enterprise/sealwrap/index.html diff --git a/website/source/docs/enterprise/hsm/behavior.html.md b/website/source/docs/enterprise/hsm/behavior.html.md index 76042b343a..3cce09bdff 100644 --- a/website/source/docs/enterprise/hsm/behavior.html.md +++ b/website/source/docs/enterprise/hsm/behavior.html.md @@ -73,7 +73,7 @@ and their API equivalents in the Additionally, Vault will refuse to initialize if the option has not been set to generate a key but no key is found. See -[Configuration](/docs/vault-enterprise/hsm/configuration.html) for more details. +[Configuration](/docs/configuration/seal/pkcs11.html) for more details. ### Rekeying diff --git a/website/source/docs/enterprise/hsm/security.html.md b/website/source/docs/enterprise/hsm/security.html.md index 18424613a3..c326e02cc8 100644 --- a/website/source/docs/enterprise/hsm/security.html.md +++ b/website/source/docs/enterprise/hsm/security.html.md @@ -23,7 +23,7 @@ stored in Vault's configuration file, read access to the file should be tightly controlled to appropriate users. (Vault's configuration file should always have tight write controls.) Rather than storing these values into Vault's configuration file, they can also be supplied via the environment; see the -[Configuration](/docs/vault-enterprise/hsm/configuration.html) page for more details. +[Configuration](/docs/configuration/seal/pkcs11.html) page for more details. The attack surface of stolen PKCS#11 credentials depends highly on the individual HSM, but generally speaking, it should be assumed that if an diff --git a/website/source/docs/enterprise/sealwrap/index.html.md b/website/source/docs/enterprise/sealwrap/index.html.md index 098b354569..9840a22d49 100644 --- a/website/source/docs/enterprise/sealwrap/index.html.md +++ b/website/source/docs/enterprise/sealwrap/index.html.md @@ -10,7 +10,7 @@ description: |- # Seal Wrap Vault Enterprise features a mechanism to wrap values with an extra layer of -encryption for supporting [seals](/docs/configuration/seal.html). This adds an +encryption for supporting [seals](/docs/configuration/seal/index.html). This adds an extra layer of protection and is useful in some compliance and regulatory environments, including FIPS 140-2 environments. From 8a9d2e7df9fbe3f943db2e54017104e2885a5bb0 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 14 Nov 2017 12:34:28 -0500 Subject: [PATCH 167/329] Minor website wording updates --- website/source/docs/enterprise/sealwrap/index.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/enterprise/sealwrap/index.html.md b/website/source/docs/enterprise/sealwrap/index.html.md index 9840a22d49..8aa325462f 100644 --- a/website/source/docs/enterprise/sealwrap/index.html.md +++ b/website/source/docs/enterprise/sealwrap/index.html.md @@ -27,14 +27,14 @@ KeyStorage and KeyTransit requirements. This is on by default for many parts of Vault and opt-in for each individual mount; see the Activating Seal Wrapping section below for details. -[Download the current certification letter](/docs/enterprise/sealwrap/Vault_Compliance_Letter_signed.pdf) +[Download the current compliance letter](/docs/enterprise/sealwrap/Vault_Compliance_Letter_signed.pdf) ### Updates Since The Latest FIPS Compliance Audit The following are values that take advantage of seal wrapping in the current -release of Vault that have not yet been certified by Leidos. The mechanism for -seal wrapping is the same, they simply were not specifically evaluated by the -auditors. +release of Vault that have not yet been asserted as compliant by Leidos. The +mechanism for seal wrapping is the same, they simply were not specifically +evaluated by the auditors. * Root tokens * Replication secondary activation tokens From 249417481f64e1db2e9fbbf5528a899db42b1be5 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 14 Nov 2017 13:11:55 -0500 Subject: [PATCH 168/329] Use super to show enterprise --- website/source/layouts/docs.erb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index c24c8dd3d8..a97ddef9c9 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -98,13 +98,13 @@ seal From 09366b573c8301c6e3e4d0b221640e594773cca6 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Tue, 14 Nov 2017 13:12:20 -0500 Subject: [PATCH 169/329] Add an auto-unseal page to the docs This helps with SEO and also is where I'd expect auto unsealing to be referenced. --- .../docs/enterprise/auto-unseal/index.html.md | 32 +++++++++++++++++++ website/source/layouts/docs.erb | 17 ++++++---- 2 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 website/source/docs/enterprise/auto-unseal/index.html.md diff --git a/website/source/docs/enterprise/auto-unseal/index.html.md b/website/source/docs/enterprise/auto-unseal/index.html.md new file mode 100644 index 0000000000..3b2398c2ff --- /dev/null +++ b/website/source/docs/enterprise/auto-unseal/index.html.md @@ -0,0 +1,32 @@ +--- +layout: "docs" +page_title: "Vault Enterprise Auto Unseal" +sidebar_current: "docs-vault-enterprise-auto-unseal" +description: |- + Vault Enterprise supports automatic unsealing via cloud technologies like KMS. +--- + +# Vault Enterprise Auto Unseal + +As of version 0.9, Vault Enterprise supports opt-in automatic unsealing via +cloud technologies such Amazon KMS or Google Cloud KMS. This feature enables +operators to delegate the unsealing process to trusted cloud providers to ease +operations in the event of partial failure and to aid in the creation of new or +ephemeral clusters. + +## Enabling Auto Unseal + +Automatic unsealing is not enabled by default. To enable automatic unsealing, +specify the `seal` stanza in your Vault configuration file: + +```hcl +seal "awskms" { + aws_region = "us-east-1" + access_key = "..." + secret_key = "..." + kms_key_id = "..." +} +``` + +For a complete list of examples and supported technologies, please see the +[seal documentation](/docs/configuration/seal/index.html). diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index a97ddef9c9..584280a25c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -307,7 +307,7 @@ > Google Cloud - + > Kubernetes @@ -379,10 +379,13 @@ > Vault Enterprise From cb9171afbc6975d5f7ba8be905bb71592af476a7 Mon Sep 17 00:00:00 2001 From: Calvin Leung Huang Date: Tue, 14 Nov 2017 13:34:40 -0500 Subject: [PATCH 173/329] changelog++ --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d58ba6cebb..b6e0998db0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,16 @@ DEPRECATIONS/CHANGES: a map of each key with a corresponding object containing the `key_type`. * More granularity in audit logs: Audit request and response entires are still in RFC3339 format but now have a granularity of nanoseconds. - + * High availability related values have been moved out of the `storage` and + `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` + has been renamed to `api_addr`. The stanzas still support accepting + HA-related values to maintain backward compatibility, but top-level values + will take precedence. + * A new `seal` stanza has been added to the configuration file, which is + optional and enables configuration of the seal type to use for additional + data protection, such as using HSM or Cloud KMS solutions to encrypt and + decrypt data. + FEATURES: * **RSA Support for Transit Backend**: Transit backend can now generate RSA From ce49d77f861f21257a2f6512a14b7723fada7a8e Mon Sep 17 00:00:00 2001 From: Paul Pieralde Date: Tue, 14 Nov 2017 11:26:26 -0800 Subject: [PATCH 174/329] Docs change for Policy API (#3584) vault 0.9.0 deprecated the term `rules` in favor of the term `policy` in several of the /sys/policy APIs. The expected return state of 200 SUCCESS_NO_DATA only happens if the `policy` term is used. A response including the deprecation notice and a 204 SUCCESS_WITH_DATA status code is returned when `rules` is applied. --- website/source/api/system/policy.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/api/system/policy.html.md b/website/source/api/system/policy.html.md index e4a372373f..2a5e0cf042 100644 --- a/website/source/api/system/policy.html.md +++ b/website/source/api/system/policy.html.md @@ -36,7 +36,7 @@ $ curl \ ## Read Policy -This endpoint retrieve the rules for the named policy. +This endpoint retrieve the policy body for the named policy. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | @@ -59,7 +59,7 @@ $ curl \ ```json { - "rules": "path \"secret/foo\" {..." + "policy": "path \"secret/foo\" {..." } ``` @@ -77,13 +77,13 @@ updated, it takes effect immediately to all associated users. - `name` `(string: )` – Specifies the name of the policy to create. This is specified as part of the request URL. -- `rules` `(string: )` - Specifies the policy document. +- `policy` `(string: )` - Specifies the policy document. ### Sample Payload ```json { - "rules": "path \"secret/foo\" {..." + "policy": "path \"secret/foo\" {..." } ``` From 95d4f68d26931ab6293a80fe3f8f0d7d3d24bb37 Mon Sep 17 00:00:00 2001 From: Chris Hoffman Date: Tue, 14 Nov 2017 16:15:09 -0500 Subject: [PATCH 175/329] adding licensing docs (#3585) --- .../source/api/system/control-group.html.md | 2 + website/source/api/system/license.html.md | 78 +++++++++++++++++++ website/source/layouts/api.erb | 3 + 3 files changed, 83 insertions(+) create mode 100644 website/source/api/system/license.html.md diff --git a/website/source/api/system/control-group.html.md b/website/source/api/system/control-group.html.md index 516e7ee346..051ac3e5fb 100644 --- a/website/source/api/system/control-group.html.md +++ b/website/source/api/system/control-group.html.md @@ -8,6 +8,8 @@ description: |- ## Authorize Control Group Request +~> **Enterprise Only** – These endpoints require Vault Enterprise. + This endpoint authorizes a control group request. | Method | Path | Produces | diff --git a/website/source/api/system/license.html.md b/website/source/api/system/license.html.md new file mode 100644 index 0000000000..e290ab43b4 --- /dev/null +++ b/website/source/api/system/license.html.md @@ -0,0 +1,78 @@ +--- +layout: "api" +page_title: "/sys/license - HTTP API" +sidebar_current: "docs-http-system-license" +description: |- + The `/sys/license` endpoint is used to view and update the license used in + Vault. +--- + +# `/sys/license` + +~> **Enterprise Only** – These endpoints require Vault Enterprise. + +The `/sys/license` endpoint is used to view and update the license used in +Vault. + +## Read License + +This endpoint returns information about the currently installed license. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `GET` | `/sys/license` | `200 application/json` | + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + https://vault.rocks/v1/sys/license +``` + +### Sample Response + +```json +{ + "data": { + "expiration_time": "2017-11-14T16:34:36.546753-05:00", + "features": [ + "UI", + "HSM", + "Performance Replication", + "DR Replication" + ], + "license_id": "temporary", + "start_time": "2017-11-14T16:04:36.546753-05:00" + }, + "warnings": [ + "time left on license is 29m33s" + ] +} +``` + +## Install License + +This endpoint is used to install a license into Vault. + +| Method | Path | Produces | +| :------- | :--------------------------- | :--------------------- | +| `PUT` | `/sys/license` | `204 (empty body)` | + +### Sample Payload + +```json +{ + "text": "01ABCDEFG..." +} +``` + +### Sample Request + +``` +$ curl \ + --header "X-Vault-Token: ..." \ + --request PUT \ + --data @payload.json \ + https://vault.rocks/v1/sys/license +``` diff --git a/website/source/layouts/api.erb b/website/source/layouts/api.erb index db38c42882..796767b9fd 100644 --- a/website/source/layouts/api.erb +++ b/website/source/layouts/api.erb @@ -208,6 +208,9 @@ > /sys/leases + > + /sys/license + > /sys/mfa From 87b6919dea55da61d7cd444b2442cabb8ede8ab1 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 21 Dec 2017 08:39:55 -0500 Subject: [PATCH 317/329] Cut version 0.9.1 From 9c7e739ee7220f4aaeca6f40c3abfb19e15f9865 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 21 Dec 2017 09:00:35 -0500 Subject: [PATCH 318/329] Port website changes from ent side --- website/source/docs/enterprise/sealwrap/index.html.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/enterprise/sealwrap/index.html.md b/website/source/docs/enterprise/sealwrap/index.html.md index 8aa325462f..5b33558fe2 100644 --- a/website/source/docs/enterprise/sealwrap/index.html.md +++ b/website/source/docs/enterprise/sealwrap/index.html.md @@ -38,6 +38,8 @@ evaluated by the auditors. * Root tokens * Replication secondary activation tokens +* Client authentication information for the GCP Auth Backend +* Client authentication information for the Kubernetes Auth Backend ## Activating Seal Wrapping From 3f3e96d2a32c198849fd3a21f9c1e3943972b06b Mon Sep 17 00:00:00 2001 From: Brian Nuszkowski Date: Tue, 26 Dec 2017 13:40:44 -0500 Subject: [PATCH 319/329] =?UTF-8?q?Add=20the=20ability=20to=20pass=20in=20?= =?UTF-8?q?mfa=20parameters=20when=20authenticating=20via=20the=E2=80=A6?= =?UTF-8?q?=20(#3729)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- builtin/credential/okta/cli.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/builtin/credential/okta/cli.go b/builtin/credential/okta/cli.go index f5f850209e..8939e2a19f 100644 --- a/builtin/credential/okta/cli.go +++ b/builtin/credential/okta/cli.go @@ -38,6 +38,15 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro "password": password, } + mfa_method, ok := m["method"] + if ok { + data["method"] = mfa_method + } + mfa_passcode, ok := m["passcode"] + if ok { + data["passcode"] = mfa_passcode + } + path := fmt.Sprintf("auth/%s/login/%s", mount, username) secret, err := c.Logical().Write(path, data) if err != nil { From 2c5b6909c9b839ff2b8d2ba8709c57843d17580b Mon Sep 17 00:00:00 2001 From: Brian Shumate Date: Tue, 26 Dec 2017 13:48:45 -0500 Subject: [PATCH 320/329] Update backend config docs - addresses #3718 (#3724) --- website/source/docs/configuration/storage/consul.html.md | 2 +- website/source/docs/configuration/storage/etcd.html.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/configuration/storage/consul.html.md b/website/source/docs/configuration/storage/consul.html.md index 571fb00e01..b8f0ac3510 100644 --- a/website/source/docs/configuration/storage/consul.html.md +++ b/website/source/docs/configuration/storage/consul.html.md @@ -66,7 +66,7 @@ at Consul's service discovery layer. [consistency mode][consul-consistency]. Possible values are `"default"` or `"strong"`. -- `disable_registration` `(bool: false)` – Specifies whether Vault should +- `disable_registration` `(string: "false")` – Specifies whether Vault should register itself with Consul. - `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent diff --git a/website/source/docs/configuration/storage/etcd.html.md b/website/source/docs/configuration/storage/etcd.html.md index cb0bf2bd51..46d981c2f9 100644 --- a/website/source/docs/configuration/storage/etcd.html.md +++ b/website/source/docs/configuration/storage/etcd.html.md @@ -45,7 +45,7 @@ storage "etcd" { version is 3.1+ and there has been no data written using the v2 API, the auto-detected default is v3. -- `ha_enabled` `(bool: false)` – Specifies if high availability should be +- `ha_enabled` `(string: "false")` – Specifies if high availability should be enabled. This can also be provided via the environment variable `ETCD_HA_ENABLED`. From f25f546eb9cfb1499076e56cda312451c7fc3f7d Mon Sep 17 00:00:00 2001 From: Brian Shumate Date: Tue, 26 Dec 2017 13:51:15 -0500 Subject: [PATCH 321/329] Docs: Updated Telemetry documentation (#3722) --- .../source/docs/internals/telemetry.html.md | 691 ++++++++++++++---- 1 file changed, 543 insertions(+), 148 deletions(-) diff --git a/website/source/docs/internals/telemetry.html.md b/website/source/docs/internals/telemetry.html.md index b3fe6cdba2..f290990c19 100644 --- a/website/source/docs/internals/telemetry.html.md +++ b/website/source/docs/internals/telemetry.html.md @@ -8,178 +8,573 @@ description: |- # Telemetry -The Vault agent collects various runtime metrics about the performance of -different libraries and subsystems. These metrics are aggregated on a ten -second interval and are retained for one minute. +The Vault server process collects various runtime metrics about the performance of different libraries and subsystems. These metrics are aggregated on a ten second interval and are retained for one minute. -To view this data, you must send a signal to the Vault process: on Unix, -this is `USR1` while on Windows it is `BREAK`. Once Vault receives the signal, -it will dump the current telemetry information to the agent's `stderr`. +To view the raw data, you must send a signal to the Vault process: on Unix-style operating systems, this is `USR1` while on Windows it is `BREAK`. When the Vault process receives this signal it will dump the current telemetry information to the process's `stderr`. -This telemetry information can be used for debugging or otherwise -getting a better view of what Vault is doing. +This telemetry information can be used for debugging or otherwise getting a better view of what Vault is doing. -Telemetry information can be streamed to both [statsite](https://github.com/armon/statsite) -as well as statsd based on providing the appropriate configuration options. +Telemetry information can also be streamed directly from Vault to a range of metrics aggregation solutions as described in the [telemetry Stanza documentation][telemetry-stanza]. -Below is sample output of a telemetry dump: +The following is an example telemetry dump snippet: ```text -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.num_goroutines': 12.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.free_count': 11882.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.total_gc_runs': 9.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.expire.num_leases': 1.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.alloc_bytes': 502992.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.sys_bytes': 3999992.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.malloc_count': 17315.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.heap_objects': 5433.000 -[2015-04-20 12:24:30 -0700 PDT][G] 'vault.runtime.total_gc_pause_ns': 3794124.000 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.audit.log_response': Count: 2 Min: 0.001 Mean: 0.001 Max: 0.001 Stddev: 0.000 Sum: 0.002 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.route.read.secret-': Count: 1 Sum: 0.036 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.barrier.get': Count: 3 Min: 0.004 Mean: 0.021 Max: 0.050 Stddev: 0.025 Sum: 0.064 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.token.lookup': Count: 2 Min: 0.040 Mean: 0.074 Max: 0.108 Stddev: 0.048 Sum: 0.148 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.policy.get_policy': Count: 2 Min: 0.003 Mean: 0.004 Max: 0.005 Stddev: 0.001 Sum: 0.009 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.core.check_token': Count: 2 Min: 0.053 Mean: 0.087 Max: 0.121 Stddev: 0.048 Sum: 0.174 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.audit.log_request': Count: 2 Min: 0.001 Mean: 0.001 Max: 0.001 Stddev: 0.000 Sum: 0.002 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.barrier.put': Count: 3 Min: 0.004 Mean: 0.010 Max: 0.019 Stddev: 0.008 Sum: 0.029 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.route.write.secret-': Count: 1 Sum: 0.035 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.core.handle_request': Count: 2 Min: 0.097 Mean: 0.228 Max: 0.359 Stddev: 0.186 Sum: 0.457 -[2015-04-20 12:24:30 -0700 PDT][S] 'vault.expire.register': Count: 1 Sum: 0.18 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.expire.num_leases': 5100.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.num_goroutines': 39.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.sys_bytes': 222746880.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.malloc_count': 109189192.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.free_count': 108408240.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.heap_objects': 780953.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.total_gc_runs': 232.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.alloc_bytes': 72954392.000 +[2017-12-19 20:37:50 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.total_gc_pause_ns': 150293024.000 +[2017-12-19 20:37:50 +0000 UTC][S] 'vault.merkle.flushDirty': Count: 100 Min: 0.008 Mean: 0.027 Max: 0.183 Stddev: 0.024 Sum: 2.681 LastUpdated: 2017-12-19 20:37:59.848733035 +0000 UTC m=+10463.692105920 +[2017-12-19 20:37:50 +0000 UTC][S] 'vault.merkle.saveCheckpoint': Count: 4 Min: 0.021 Mean: 0.054 Max: 0.110 Stddev: 0.039 Sum: 0.217 LastUpdated: 2017-12-19 20:37:57.048458148 +0000 UTC m=+10460.891835029 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.alloc_bytes': 73326136.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.sys_bytes': 222746880.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.malloc_count': 109195904.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.free_count': 108409568.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.heap_objects': 786342.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.total_gc_pause_ns': 150293024.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.expire.num_leases': 5100.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.num_goroutines': 39.000 +[2017-12-19 20:38:00 +0000 UTC][G] 'vault.7f320e57f9fe.runtime.total_gc_runs': 232.000 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.route.rollback.consul-': Count: 1 Sum: 0.013 LastUpdated: 2017-12-19 20:38:01.968471579 +0000 UTC m=+10465.811842067 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.rollback.attempt.consul-': Count: 1 Sum: 0.073 LastUpdated: 2017-12-19 20:38:01.968502743 +0000 UTC m=+10465.811873131 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.rollback.attempt.pki-': Count: 1 Sum: 0.070 LastUpdated: 2017-12-19 20:38:01.96867005 +0000 UTC m=+10465.812041936 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.route.rollback.auth-app-id-': Count: 1 Sum: 0.012 LastUpdated: 2017-12-19 20:38:01.969146401 +0000 UTC m=+10465.812516689 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.rollback.attempt.identity-': Count: 1 Sum: 0.063 LastUpdated: 2017-12-19 20:38:01.968029888 +0000 UTC m=+10465.811400276 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.rollback.attempt.database-': Count: 1 Sum: 0.066 LastUpdated: 2017-12-19 20:38:01.969394215 +0000 UTC m=+10465.812764603 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.barrier.get': Count: 16 Min: 0.010 Mean: 0.015 Max: 0.031 Stddev: 0.005 Sum: 0.237 LastUpdated: 2017-12-19 20:38:01.983268118 +0000 UTC m=+10465.826637008 +[2017-12-19 20:38:00 +0000 UTC][S] 'vault.merkle.flushDirty': Count: 100 Min: 0.006 Mean: 0.024 Max: 0.098 Stddev: 0.019 Sum: 2.386 LastUpdated: 2017-12-19 20:38:09.848158309 +0000 UTC m=+10473.691527099 ``` You'll note that log entries are prefixed with the metric type as follows: -- `[C]` is a counter -- `[G]` is a gauge -- `[S]` is a summary +- **[C]** is a counter +- **[G]** is a gauge +- **[S]** is a summary -## Key Metrics -The following tables described the different Vault metrics. The metrics interval can be assumed to be 10 seconds when retrieving metrics using the above described signals. +The following sections describe available Vault metrics. The metrics interval can be assumed to be 10 seconds when manually triggering metrics output using the above described signals. -### Internal Metrics +## Internal Metrics These metrics represent operational aspects of the running Vault instance. -| Metric | Description | Unit | Type | -| ---------------- | ----------------------------------| ---- | ---- | -|`vault.audit.log_request`| This measures the number of audit log requests | Number of requests | Summary | -|`vault.audit.log_response`| This measures the number of audit log responses | Number of responses | Summary | -|`vault.audit.log_request_failure` | The number of audit log request failures | Number of failures | Counter | -|`vault.barrier.delete`| This measures the number of delete operations at the barrier | Number of operations | Summary | -|`vault.barrier.get`| This measures the number of get operations at the barrier | Number of operations | Summary | -|`vault.barrier.put`| This measures the number of put operations at the barrier | Number of operations | Summary | -|`vault.barrier.list`| This measures the number of list operations at the barrier | Number of operations | Counter | -|`vault.core.check_token`| This measures the number of token checks | Number of checks | Summary | -|`vault.core.fetch_acl_and_token`| This measures the number of ACL and corresponding token entry fetches | Number of fetches | Summary | -|`vault.core.handle_request`| This measures the number of requests | Number of requests | Summary | -|`vault.core.handle_login_request`| This measures the number of login requests | Number of requests | Summary | -|`vault.core.leadership_setup_failed`| This measures the number of cluster leadership setup failures | Number of failures | Summary | -|`vault.core.leadership_lost`| This measures the number of cluster leadership losses | Number of losses | Summary | -|`vault.core.post_unseal` | This measures the number of post-unseal operations | Number of operations | Gauge | -|`vault.core.pre_seal`| This measures the number of pre-seal operations | Number of operations | Gauge | -|`vault.core.seal-with-request`| This measures the number of requested seal operations | Number of operations | Gauge | -|`vault.core.seal`| This measures the number of seal operations | Number of operations | Gauge | -|`vault.core.seal-internal`| This measures the number of internal seal operations | Number of operations | Gauge | -|`vault.core.step_down`| This measures the number of cluster leadership step downs | Number of stepdowns | Summary | -|`vault.core.unseal`| This measures the number of unseal operations | Number of operations | Summary | -|`vault.runtime.alloc_bytes` | This measures the number of bytes allocated by the Vault process. This may burst from time to time but should return to a steady state value.| Number of bytes | Gauge | -|`vault.runtime.free_count`| This measures the number of `free` operations | Number of operations | Gauge | -|`vault.runtime.heap_objects`| This measures the number of objects on the heap and is a good general memory pressure indicator | Number of heap objects | Gauge | -|`vault.runtime.malloc_count`| This measures the number of `malloc` operations | Number of operations | Gauge | -|`vault.runtime.num_goroutines`| This measures the number of goroutines and serves as a general load indicator | Number of goroutines| Gauge | -|`vault.runtime.sys_bytes`| This measures the number of bytes allocated to Vault and includes what is being used by the heap and what has been reclaimed but not given back| Number of bytes | Gauge | -|`vault.runtime.total_gc_pause_ns` | This measures the total garbage collector pause time since the Vault instance was last started | Nanosecond | Summary | -| `vault.runtime.total_gc_runs` | Total number of garbage collection runs since the Vault instance was last started | Number of operations | Gauge | +### vault.audit.log_request -### Policy and Token Metrics +**[S]** Summary (Number of requests): Number of audit log requests + +### vault.audit.log_response + +**[S]** Summary (Number of responses): Number of audit log responses + +### vault.audit.log_request_failure + +**[C]** Counter (Number of failures): Number of audit log request failures + +**NOTE**: This is a particularly important metric. Any non-zero value here indicates that there was a failure to make an audit log request to any of the configured audit log backends; **when Vault cannot log to any of the configured audit log backends it ceases all user operations**, and you should begin troubleshooting the audit log backends immediately if this metric continually increases. + +### vault.audit.log_response_failure + +**[C]** Counter (Number of failures): Number of audit log response failures + +**NOTE**: This is a particularly important metric. Any non-zero value here indicates that there was a failure to receive a response to a request made to one of the configured audit log backends; **when Vault cannot log to any of the configured audit log backends it ceases all user operations**, and you should begin troubleshooting the audit log backends immediately if this metric continually increases. + +### vault.barrier.delete + +**[S]** Summary (Number of operations): Number of DELETE operations at the barrier + +### vault.barrier.get + +**[S]** Summary (Number of operations): Number of GET operations at the barrier + +### vault.barrier.put + +**[S]** Summary (Number of operations): Number of PUT operations at the barrier + +### vault.barrier.list + +**[S]** Summary (Number of operations): Number of LIST operations at the barrier + +### vault.core.check_token + +**[S]** Summary (Number of checks): Number of token checks handled by Vault core + +### vault.core.fetch_acl_and_token + +**[S]** Summary (Number of fetches): Number of ACL and corresponding token entry fetches handled by Vault core + +### vault.core.handle_request + +**[S]** Summary (Number of requests) Number of requests handled by Vault core + +### vault.core.handle_login_request + +**[S]** Summary (Number of requests): Number of login requests handled by Vault core + +### vault.core.leadership_setup_failed + +**[S]** Summary (Number of failures): Number of cluster leadership setup failures which have occurred in a highly available Vault cluster + +This should be monitored and alerted on for overall cluster leadership status + +### vault.core.leadership_lost + +**[S]** Summary (Number of losses): Number of cluster leadership losses which have occurred in a highly available Vault cluster + +This should be monitored and alerted on for overall cluster leadership status + +### vault.core.post_unseal + +**[G]** Gauge (Number of operations): Number of post-unseal operations handled by Vault core + +### vault.core.pre_seal + +**[G]** Gauge (Number of operations) Number of pre-seal operations + +### vault.core.seal-with-request + +**[G]** Gauge (Number of operations): Number of requested seal operations + +### vault.core.seal + +**[G]** Gauge (Number of operations): Number of seal operations + +### vault.core.seal-internal + +**[G]** Gauge (Number of operations): Number of internal seal operations + +### vault.core.step_down + +**[S]** Summary (Number of step downs): Number of cluster leadership step downs + +This should be monitored and alerted on for overall cluster leadership status + +### vault.core.unseal + +**[S]** Summary (Number of operations): Number of unseal operations + +### vault.runtime.alloc_bytes + +**[G]** Gauge (Number of bytes): Number of bytes allocated by the Vault process. + +This could burst from time to time, but should return to a steady state value. + +### vault.runtime.free_count + +**[G]** Gauge (Number of objects): Number of freed objects + +### vault.runtime.heap_objects + +**[G]** Gauge (Number of objects): Number of objects on the heap + +This is a good general memory pressure indicator worth establishing a baseline and thresholds for alerting. + +### vault.runtime.malloc_count + +**[G]** Gauge (Number of objects): Cumulative count of allocated heap objects + +### vault.runtime.num_goroutines + +**[G]** Gauge (Number of goroutines): Number of goroutines + +This serves as a general system load indicator worth establishing a baseline and thresholds for alerting. + +### vault.runtime.sys_bytes + +**[G]** Gauge (Number of bytes): Number of bytes allocated to Vault + +This includes what is being used by Vault's heap and what has been reclaimed but not given back to the operating system. + +### vault.runtime.total_gc_pause_ns + +**[S]** Summary (Nanoseconds): The total garbage collector pause time since Vault was last started + +### vault.runtime.total_gc_runs + +**[G]** Gauge (Number of operations): Total number of garbage collection runs since Vault was last started + +## Policy and Token Metrics These metrics relate to policies and tokens. -| Metric | Description | Unit | Type | -| ---------------- | ----------------------------------| ---- | ---- | -`vault.expire.fetch-lease-times`| This measures the number of lease time fetch operations | Number of operations | Gauge | -`vault.expire.fetch-lease-times-by-token`| This measures the number of operations which compute lease times by token | Number of operations | Gauge | -`vault.expire.num_leases`| This measures the number of expired leases | Number of expired leases | Gauge | -`vault.expire.revoke`| This measures the number of revoke operations | Number of operations | Counter | -`vault.expire.revoke-force`| This measures the number of forced revoke operations | Number of operations | Counter | -`vault.expire.revoke-prefix`| This measures the number of operations used to revoke all secrets with a given prefix | Number of operations | Counter | -`vault.expire.revoke-by-token`| This measures the number of operations used to revoke all secrets issued with a given token | Number of operations | Counter | -`vault.expire.renew`| This measures the number of renew operations | Number of operations | Counter | -`vault.expire.renew-token`| This measures the number of renew token operations to renew a token which does not need to invoke a logical backend | Number of operations | Gauge | -`vault.expire.register`| This measures the number of register operations which take a request and response with an associated lease and register a lease entry with lease ID | Number of operations | Gauge | -`vault.expire.register-auth`| This measures the number of register auth operations which create lease entries without lease ID | Number of operations | Gauge | -`vault.policy.get_policy`| This measures the number of policy get operations | Number of operations | Counter | -`vault.policy.list_policies`| This measures the number of policy list operations | Number of operations | Counter | -`vault.policy.delete_policy`| This measures the number of policy delete operations | Number of operations | Counter | -`vault.policy.set_policy`| This measures the number of policy set operations | Number of operations | Gauge | -`vault.token.create`| This measures the time taken to create a token | Number of operations | Gauge | -`vault.token.createAccessor`| This measures the number of Token ID identifier operations | Number of operations | Gauge | -`vault.token.lookup`| This measures the number of token lookups | Number of lookups | Counter | -`vault.token.revoke`| This measures the number of token revocation operations | Number of operations | Gauge | -`vault.token.revoke-tree`| This measures the number of revoke tree operations | Number of operations | Gauge | -`vault.token.store`| This measures the number of operations to store an updated token entry without writing to the secondary index | Number of operations | Gauge | +### vault.expire.fetch-lease-times -### Authentication Backend Metrics +**[S]** Summary (Nanoseconds): Time taken to fetch lease times + +### vault.expire.fetch-lease-times-by-token + +**[S]** Summary (Nanoseconds): Time taken to fetch lease times by token + +### vault.expire.num_leases + +**[G]** Gauge (Number of leases): Number of all leases which are eligible for eventual expiry + +### vault.expire.revoke + +**[S]** Summary (Nanoseconds): Time taken to revoke a token + +### vault.expire.revoke-force + +**[S]** Summary (Nanoseconds): Time taken to forcibly revoke a token + +### vault.expire.revoke-prefix + +**[S]** Summary (Nanoseconds): Time taken to revoke tokens on a prefix + +### vault.expire.revoke-by-token + +**[S]** Summary (Nanoseconds): Time taken to revoke all secrets issued with a given token + +### vault.expire.renew + +**[S]** Summary (Nanoseconds): Time taken to renew a lease + +### vault.expire.renew-token + +**[S]** Summary (Nanoseconds): Time taken to renew a token which does not need to invoke a logical backend + +### vault.expire.register + +**[S]** Summary (Nanoseconds): Time taken for register operations + +Thes operations take a request and response with an associated lease and register a lease entry with lease ID + +### vault.expire.register-auth + +**[S]** Summary (Nanoseconds): Time taken for register authentication operations which create lease entries without lease ID + +### vault.policy.get_policy + +**[S]** Summary (Nanoseconds): Time taken to get a policy + +### vault.policy.list_policies + +**[S]** Summary (Nanoseconds): Time taken to list policies + +### vault.policy.delete_policy + +**[S]** Summary (Nanoseconds): Time taken to delete a policy + +### vault.policy.set_policy + +**[S]** Summary (Nanoseconds): Time taken to set a policy + +### vault.token.create + +**[S]** Summary (Nanoseconds): The time taken to create a token + +### vault.token.createAccessor + +**[S]** Summary (Nanoseconds): The time taken to create a token + +### vault.token.lookup + +**[S]** Summary (Nanoseconds): The time taken to look up a token + +### vault.token.revoke + +**[S]** Summary (Nanoseconds): Time taken to revoke a token + +### vault.token.revoke-tree + +**[S]** Summary (Nanoseconds): Time taken to revoke a token tree + +### vault.token.store + +**[S]** Summary (Nanoseconds): Time taken to store an updated token entry without writing to the secondary index + +## Authentication Backend Metrics These metrics relate to supported authentication backends. -| Metric | Description | Unit | Type | -| ---------------- | ----------------------------------| ---- | ---- | -| `vault.rollback.attempt.auth-token-` | This measures the number of rollback operations attempted for authentication tokens backend | Number of operations | Summary | -| `vault.rollback.attempt.auth-ldap-` | This measures the number of rollback operations attempted for the LDAP authentication backend | Number of operations | Summary | -| `vault.rollback.attempt.cubbyhole-` | This measures the number of rollback operations attempted for the cubbyhole authentication backend | Number of operations | Summary | -| `vault.rollback.attempt.secret-` | This measures the number of rollback operations attempted for the kv secret backend | Number of operations | Summary | -| `vault.rollback.attempt.sys-` | This measures the number of rollback operations attempted for the sys backend | Number of operations | Summary | -| `vault.route.rollback.auth-ldap-` | This measures the number of rollback operations for the LDAP authentication backend | Number of operations | Summary | -| `vault.route.rollback.auth-token-` | This measures the number of rollback operations for the authentication tokens backend | Number of operations | Summary | -| `vault.route.rollback.cubbyhole-` | This measures the number of rollback operations for the cubbyhole authentication backend | Number of operations | Summary | -| `vault.route.rollback.secret-` | This measures the number of rollback operations for the kv secret backend | Number of operations | Summary | -| `vault.route.rollback.sys-` | This measures the number of rollback operations for the sys backend | Number of operations | Summary | +### vault.rollback.attempt.auth-token- -### Storage Backend Metrics +**[S]** Summary (Nanoseconds): Time taken to perform a rollback operation for the [token authentication backend][token-auth-backend] -These metrics relate to supported storage backends. +### vault.rollback.attempt.auth-ldap- -| Metric | Description | Unit | Type | -| ---------------- | ----------------------------------| ---- | ---- | -|`vault.azure.put` | This measures the number of put operations against the Azure storage backend | Number of operations | Gauge | -|`vault.azure.get` | This measures the number of get operations against the Azure storage backend | Number of operations | Gauge | -|`vault.azure.delete` | This measures the number of delete operations against the Azure storage backend | Number of operations | Gauge | -|`vault.azure.list` | This measures the number of list operations against the Azure storage backend | Number of operations | Gauge | -|`vault.consul.put` | This measures the number of put operations against the Consul storage backend | Number of operations | Gauge | -|`vault.consul.get` | This measures the number of get operations against the Consul storage backend | Number of operations | Gauge | -|`vault.consul.delete` | This measures the number of delete operations against the Consul storage backend | Number of operations | Gauge | -|`vault.consul.list` | This measures the number of list operations against the Consul storage backend | Number of operations | Gauge | -|`vault.dynamodb.put` | This measures the number of put operations against the DynamoDB storage backend | Number of operations | Gauge | -|`vault.dynamodb.get` | This measures the number of get operations against the DynamoDB storage backend | Number of operations | Gauge | -|`vault.dynamodb.delete` | This measures the number of delete operations against the DynamoDB storage backend | Number of operations | Gauge | -|`vault.dynamodb.list` | This measures the number of list operations against the DynamoDB storage backend | Number of operations | Gauge | -|`vault.etcd.put` | This measures the number of put operations against the etcd storage backend | Number of operations | Gauge | -|`vault.etcd.get` | This measures the number of get operations against the etcd storage backend | Number of operations | Gauge | -|`vault.etcd.delete` | This measures the number of delete operations against the etcd storage backend | Number of operations | Gauge | -|`vault.etcd.list` | This measures the number of list operations against the etcd storage backend | Number of operations | Gauge | -|`vault.gcs.put` | This measures the number of put operations against the Google Cloud Storage backend | Number of operations | Gauge | -|`vault.gcs.get` | This measures the number of get operations against the Google Cloud Storage backend | Number of operations | Gauge | -|`vault.gcs.delete` | This measures the number of delete operations against the Google Cloud Storage backend | Number of operations | Gauge | -|`vault.gcs.list` | This measures the number of list operations against the Google Cloud Storage backend | Number of operations | Gauge | -|`vault.mysql.put` | This measures the number of put operations against the MySQL backend | Number of operations | Gauge | -|`vault.mysql.get` | This measures the number of get operations against the MySQL backend | Number of operations | Gauge | -|`vault.mysql.delete` | This measures the number of delete operations against the MySQL backend | Number of operations | Gauge | -|`vault.mysql.list` | This measures the number of list operations against the MySQL backend | Number of operations | Gauge | -|`vault.postgres.put` | This measures the number of put operations against the PostgreSQL backend | Number of operations | Gauge | -|`vault.postgres.get` | This measures the number of get operations against the PostgreSQL backend | Number of operations | Gauge | -|`vault.postgres.delete` | This measures the number of delete operations against the PostgreSQL backend | Number of operations | Gauge | -|`vault.postgres.list` | This measures the number of list operations against the PostgreSQL backend | Number of operations | Gauge | -|`vault.s3.put` | This measures the number of put operations against the Amazon S3 backend | Number of operations | Gauge | -|`vault.s3.get` | This measures the number of get operations against the Amazon S3 backend | Number of operations | Gauge | -|`vault.s3.delete` | This measures the number of delete operations against the Amazon S3 backend | Number of operations | Gauge | -|`vault.s3.list` | This measures the number of list operations against the Amazon S3 backend | Number of operations | Gauge | -|`vault.swift.put` | This measures the number of put operations against the OpenStack Swift backend | Number of operations | Gauge | -|`vault.swift.get` | This measures the number of get operations against the OpenStack Swift backend | Number of operations | Gauge | -|`vault.swift.delete` | This measures the number of delete operations against the OpenStack Swift backend | Number of operations | Gauge | -|`vault.swift.list` | This measures the number of list operations against the OpenStack Swift backend | Number of operations | Gauge | -|`vault.zookeeper.put` | This measures the number of put operations against the ZooKeeper backend | Number of operations | Gauge | -|`vault.zookeeper.get` | This measures the number of get operations against the ZooKeeper backend | Number of operations | Gauge | -|`vault.zookeeper.delete` | This measures the number of delete operations against the ZooKeeper backend | Number of operations | Gauge | -|`vault.zookeeper.list` | This measures the number of list operations against the ZooKeeper backend | Number of operations | Gauge | +**[S]** Summary (Nanoseconds): Time taken to perform a rollback operation for the [LDAP authentication backend][ldap-auth-backend] + +### vault.rollback.attempt.cubbyhole- + +**[S]** Summary (Nanoseconds): Time taken to perform a rollback operation for the [Cubbyhole secret backend][cubbyhole-secret-backend] + +### vault.rollback.attempt.secret- + +**[S]** Summary (Nanoseconds): Time taken to perform a rollback operation for the [K/V secret backend][kv-secret-backend] + +### vault.rollback.attempt.sys- + +**[S]** Summary (Nanoseconds): Time taken to perform a rollback operation for the system backend + +### vault.route.rollback.auth-ldap- + +**[S]** Summary (Nanoseconds): Time taken to perform a route rollback operation for the [LDAP authentication backend][ldap-auth-backend] + +### vault.route.rollback.auth-token- + +**[S]** Summary (Nanoseconds): Time taken to perform a route rollback operation for the [token authentication backend][token-auth-backend] + +### vault.route.rollback.cubbyhole- + +**[S]** Summary (Nanoseconds): Time taken to perform a route rollback operation for the [Cubbyhole secret backend][cubbyhole-secret-backend] + +### vault.route.rollback.secret- + +**[S]** Summary (Nanoseconds): Time taken to perform a route rollback operation for the [K/V secret backend][kv-secret-backend] + +### vault.route.rollback.sys- + +**[S]** Summary (Nanoseconds): Time taken to perform a route rollback operation for the system backend + +## Storage Backend Metrics + +These metrics relate to the supported storage backends. + +### vault.azure.put + +**[S]** Summary (Number of operations): Number of put operations against the [Azure storage backend][azure-storage-backend] + +### vault.azure.get + +**[S]** Summary (Number of operations):Number of get operations against the [ +Azure storage backend][azure-storage-backend] + +### vault.azure.delete + +**[S]** Summary (Number of operations):Number of delete operations against the [Azure storage backend][azure-storage-backend] + +### vault.azure.list + +**[S]** Summary (Number of operations):Number of list operations against the [Azure storage backend][azure-storage-backend] + +### vault.cassandra.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [Cassandra storage backend][cassandra-storage-backend] + +### vault.cassandra.get + +**[S]** Summary (Number of operations): Number of GET operations against the [Cassandra storage backend][cassandra-storage-backend] + +### vault.cassandra.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [Cassandra storage backend][cassandra-storage-backend] + +### vault.cassandra.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [Cassandra storage backend][cassandra-storage-backend] + +### vault.cockroachdb.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [CockroachDB storage backend][cockroachdb-storage-backend] + +### vault.cockroachdb.get + +**[S]** Summary (Number of operations): Number of GET operations against the [CockroachDB storage backend][cockroachdb-storage-backend] + +### vault.cockroachdb.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [CockroachDB storage backend][cockroachdb-storage-backend] + +### vault.cockroachdb.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [CockroachDB storage backend][cockroachdb-storage-backend] + +### vault.consul.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [Consul storage backend][consul-storage-backend] + +### vault.consul.get + +**[S]** Summary (Number of operations): Number of GET operations against the [Consul storage backend][consul-storage-backend] + +### vault.consul.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [Consul storage backend][consul-storage-backend] + +### vault.consul.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [Consul storage backend][consul-storage-backend] + +### vault.couchdb.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [CouchDB storage backend][couchdb-storage-backend] + +### vault.couchdb.get + +**[S]** Summary (Number of operations): Number of GET operations against the [CouchDB storage backend][couchdb-storage-backend] + +### vault.couchdb.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [CouchDB storage backend][couchdb-storage-backend] + +### vault.couchdb.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [CouchDB storage backend][couchdb-storage-backend] + +### vault.dynamodb.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [DynamoDB storage backend][dynamodb-storage-backend] + +### vault.dynamodb.get + +**[S]** Summary (Number of operations): Number of GET operations against the [DynamoDB storage backend][dynamodb-storage-backend] + +### vault.dynamodb.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [DynamoDB storage backend][dynamodb-storage-backend] + +### vault.dynamodb.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [DynamoDB storage backend][dynamodb-storage-backend] + +### vault.etcd.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [etcd storage backend][etcd-storage-backend] + +### vault.etcd.get + +**[S]** Summary (Number of operations): Number of GET operations against the [etcd storage backend][etcd-storage-backend] + +### vault.etcd.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [etcd storage backend][etcd-storage-backend] + +### vault.etcd.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [etcd storage backend][etcd-storage-backend] + +### vault.gcs.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [Google Cloud Storage storage backend][gcs-storage-backend] + +### vault.gcs.get + +**[S]** Summary (Number of operations): Number of GET operations against the [Google Cloud Storage storage backend][gcs-storage-backend] + +### vault.gcs.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [Google Cloud Storage storage backend][gcs-storage-backend] + +### vault.gcs.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [Google Cloud Storage storage backend][gcs-storage-backend] + +### vault.mssql.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [MS-SQL storage backend][mssql-storage-backend] + +### vault.mssql.get + +**[S]** Summary (Number of operations): Number of GET operations against the [MS-SQL storage backend][mssql-storage-backend] + +### vault.mssql.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [MS-SQL storage backend][mssql-storage-backend] + +### vault.mssql.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [MS-SQL storage backend][mssql-storage-backend] + +### vault.mysql.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [MySQL storage backend][mysql-storage-backend] + +### vault.mysql.get + +**[S]** Summary (Number of operations): Number of GET operations against the [MySQL storage backend][mysql-storage-backend] + +### vault.mysql.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [MySQL storage backend][mysql-storage-backend] + +### vault.mysql.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [MySQL storage backend][mysql-storage-backend] + +### vault.postgres.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [PostgreSQL storage backend][postgresql-storage-backend] + +### vault.postgres.get + +**[S]** Summary (Number of operations): Number of GET operations against the [PostgreSQL storage backend][postgresql-storage-backend] + +### vault.postgres.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [PostgreSQL storage backend][postgresql-storage-backend] + +### vault.postgres.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [PostgreSQL storage backend][postgresql-storage-backend] + +### vault.s3.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [Amazon S3 storage backend][s3-storage-backend] + +### vault.s3.get + +**[S]** Summary (Number of operations): Number of GET operations against the [Amazon S3 storage backend][s3-storage-backend] + +### vault.s3.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [Amazon S3 storage backend][s3-storage-backend] + +### vault.s3.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [Amazon S3 storage backend][s3-storage-backend] + +### vault.swift.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [Swift storage backend][swift-storage-backend] + +### vault.swift.get + +**[S]** Summary (Number of operations): Number of GET operations against the [Swift storage backend][swift-storage-backend] + +### vault.swift.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [Swift storage backend][swift-storage-backend] + +### vault.swift.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [Swift storage backend][swift-storage-backend] + +### vault.zookeeper.put + +**[S]** Summary (Number of operations): Number of PUT operations against the [ZooKeeper storage backend][zookeeper-storage-backend] + +### vault.zookeeper.get + +**[S]** Summary (Number of operations): Number of GET operations against the [ZooKeeper storage backend][zookeeper-storage-backend] + +### vault.zookeeper.delete + +**[S]** Summary (Number of operations): Number of DELETE operations against the [ZooKeeper storage backend][zookeeper-storage-backend] + +### vault.zookeeper.list + +**[S]** Summary (Number of operations): Number of LIST operations against the [ZooKeeper storage backend][zookeeper-storage-backend] + +[telemetry-stanza]: /docs/configuration/telemetry.html +[cubbyhole-secret-backend]: /docs/secrets/cubbyhole/index.html +[kv-secret-backend]: /docs/secrets/kv/index.html +[ldap-auth-backend]: /docs/auth/ldap.html +[token-auth-backend]: /docs/auth/token.html +[azure-storage-backend]: /docs/configuration/storage/azure.html +[cassandra-storage-backend]: /docs/configuration/storage/cassandra.html +[cockroachdb-storage-backend]: /docs/configuration/storage/cockroachdb.html +[consul-storage-backend]: /docs/configuration/storage/consul.html +[couchdb-storage-backend]: /docs/configuration/storage/couchdb.html +[dynamodb-storage-backend]: /docs/configuration/storage/dynamodb.html +[etcd-storage-backend]: /docs/configuration/storage/etcd.html +[gcs-storage-backend]: /docs/configuration/storage/google-cloud.html +[mssql-storage-backend]: /docs/configuration/storage/mssql.html +[mysql-storage-backend]: /docs/configuration/storage/mysql.html +[postgresql-storage-backend]: /docs/configuration/storage/postgresql.html +[s3-storage-backend]: /docs/configuration/storage/s3.html +[swift-storage-backend]: /docs/configuration/storage/swift.html +[zookeeper-storage-backend]: /docs/configuration/storage/zookeeper.html From 6201056f112358abd68df8ae636afec49c232610 Mon Sep 17 00:00:00 2001 From: markpaine <71095+markpaine@users.noreply.github.com> Date: Wed, 3 Jan 2018 07:38:16 -0800 Subject: [PATCH 322/329] Spelling correction "datatabse" -> "database" (#3738) --- website/source/api/secret/databases/postgresql.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/secret/databases/postgresql.html.md b/website/source/api/secret/databases/postgresql.html.md index bb58a523fa..4edb3f42a7 100644 --- a/website/source/api/secret/databases/postgresql.html.md +++ b/website/source/api/secret/databases/postgresql.html.md @@ -61,7 +61,7 @@ $ curl \ ## Statements Statements are configured during role creation and are used by the plugin to -determine what is sent to the datatabse on user creation, renewing, and +determine what is sent to the database on user creation, renewing, and revocation. For more information on configuring roles see the [Role API](/api/secret/databases/index.html#create-role) in the Database Backend docs. From 68f87ba6f6cb19556586a0794133f7d027903b18 Mon Sep 17 00:00:00 2001 From: markpaine <71095+markpaine@users.noreply.github.com> Date: Wed, 3 Jan 2018 07:38:55 -0800 Subject: [PATCH 323/329] Spelling correction. "specifig" -> "specific" (#3739) --- website/source/api/secret/databases/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/secret/databases/index.html.md b/website/source/api/secret/databases/index.html.md index 55b8fcb30c..e77eb4ed41 100644 --- a/website/source/api/secret/databases/index.html.md +++ b/website/source/api/secret/databases/index.html.md @@ -21,7 +21,7 @@ any location, please update your API calls accordingly. This endpoint configures the connection string used to communicate with the desired database. In addition to the parameters listed here, each Database -plugin has additional, database plugin specifig, parameters for this endpoint. +plugin has additional, database plugin specific, parameters for this endpoint. Please read the HTTP API for the plugin you'd wish to configure to see the full list of additional parameters. From 3108692119dec3eb09cfbc32fc84af4b501d9909 Mon Sep 17 00:00:00 2001 From: Alexandre Nicastro Date: Wed, 3 Jan 2018 13:47:15 -0200 Subject: [PATCH 324/329] docs: fix typo (change 'a' to 'an' - indefinite article) (#3741) --- website/source/docs/internals/replication.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/internals/replication.html.md b/website/source/docs/internals/replication.html.md index 3ce336a850..cc11b19232 100644 --- a/website/source/docs/internals/replication.html.md +++ b/website/source/docs/internals/replication.html.md @@ -90,7 +90,7 @@ will automatically reconcile with the primary. Lastly, clients can speak to any Vault server without a thick client. If a client is communicating with a standby instance, the request is automatically -forwarded to a active instance. Secondary clusters will service reads locally +forwarded to an active instance. Secondary clusters will service reads locally and forward any write requests to the primary cluster. The primary cluster is able to service all request types. From ad0a39dfe10585fca85b2102af34dfd3d54851ae Mon Sep 17 00:00:00 2001 From: dmwilcox Date: Wed, 3 Jan 2018 07:59:18 -0800 Subject: [PATCH 325/329] Update docs to reflect ability to load cold CA certs to output full chains. (#3740) --- helper/certutil/helpers.go | 4 ++-- website/source/api/secret/pki/index.html.md | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/helper/certutil/helpers.go b/helper/certutil/helpers.go index 4256edb002..b6e61b20b0 100644 --- a/helper/certutil/helpers.go +++ b/helper/certutil/helpers.go @@ -102,8 +102,8 @@ func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { // ParsePEMBundle takes a string of concatenated PEM-format certificate // and private key values and decodes/parses them, checking validity along -// the way. There must be at max two certificates (a certificate and its -// issuing certificate) and one private key. +// the way. The first certificate must be the subject certificate and issuing +// certificates may follow. There must be at most one private key. func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { if len(pemBundle) == 0 { return nil, errutil.UserError{"empty pem bundle"} diff --git a/website/source/api/secret/pki/index.html.md b/website/source/api/secret/pki/index.html.md index ac84803142..7860df224c 100644 --- a/website/source/api/secret/pki/index.html.md +++ b/website/source/api/secret/pki/index.html.md @@ -175,9 +175,14 @@ $ curl \ ## Submit CA Information This endpoint allows submitting the CA information for the backend via a PEM -file containing the CA certificate and its private key, concatenated. Not needed -if you are generating a self-signed root certificate, and not used if you have a -signed intermediate CA certificate with a generated key (use the +file containing the CA certificate and its private key, concatenated. + +May optionally append additional CA certificates. Useful when creating an +intermediate CA to ensure a full chain is returned when signing or generating +certificates. + +Not needed if you are generating a self-signed root certificate, and not used +if you have a signed intermediate CA certificate with a generated key (use the `/pki/intermediate/set-signed` endpoint for that). _If you have already set a certificate and key, they will be overridden._ From ec8befbaacd6903025cc124074ceee0de722424a Mon Sep 17 00:00:00 2001 From: Didi Kohen Date: Wed, 3 Jan 2018 18:18:38 +0200 Subject: [PATCH 326/329] Clarify that keybase is supported only in the CLI (#3744) --- website/source/docs/concepts/pgp-gpg-keybase.html.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/website/source/docs/concepts/pgp-gpg-keybase.html.md b/website/source/docs/concepts/pgp-gpg-keybase.html.md index 9e84ae44d2..ca4cc0385d 100644 --- a/website/source/docs/concepts/pgp-gpg-keybase.html.md +++ b/website/source/docs/concepts/pgp-gpg-keybase.html.md @@ -16,6 +16,14 @@ and services like Keybase.io to provide an additional layer of security when performing certain operations. This page details the various PGP integrations, their use, and operation. +Keybase.io support is available only in the command-line tool and not via the +Vault HTTP API, tools that help with initialization should use the Keybase.io +API in order to obtain the GPG keys needed for a secure initialization if you +want them to use Keybase for keys. + +Once the Vault has been initialized, it is possible to use Keybase to decrypt +the shards and unseal normally. + ## Initializing with PGP One of the early fundamental problems when bootstrapping and initializing Vault was that the first user (the initializer) received a plain-text copy of all of From 326e1ab24cb7174161de0ed2b89a919aea710cb8 Mon Sep 17 00:00:00 2001 From: Brian Nuszkowski Date: Wed, 3 Jan 2018 12:09:43 -0500 Subject: [PATCH 327/329] Update '/auth/token/revoke-self' endpoint documentation to reflect the proper response code (#3735) --- website/source/api/auth/token/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/auth/token/index.html.md b/website/source/api/auth/token/index.html.md index 7b6bf98895..1e63e31312 100644 --- a/website/source/api/auth/token/index.html.md +++ b/website/source/api/auth/token/index.html.md @@ -437,7 +437,7 @@ revoked, all dynamic secrets generated with it are also revoked. | Method | Path | Produces | | :------- | :--------------------------- | :--------------------- | -| `POST` | `/auth/token/revoke-self` | `200 application/json` | +| `POST` | `/auth/token/revoke-self` | `204 (empty body)` | ### Sample Request From f57329a37a47c0b06b25f04db91d4a1329bc54fa Mon Sep 17 00:00:00 2001 From: Jon Davies Date: Wed, 3 Jan 2018 17:11:00 +0000 Subject: [PATCH 328/329] s3.go: Added options to use paths with S3 and the ability to disable SSL (#3730) --- physical/s3/s3.go | 23 +++++++++++++++++-- .../docs/configuration/storage/s3.html.md | 8 ++++++- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/physical/s3/s3.go b/physical/s3/s3.go index 7118e7da14..5adae1aca2 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -22,6 +22,7 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/awsutil" "github.com/hashicorp/vault/helper/consts" + "github.com/hashicorp/vault/helper/parseutil" "github.com/hashicorp/vault/physical" ) @@ -72,6 +73,22 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, } } } + s3ForcePathStyleStr, ok := conf["s3_force_path_style"] + if !ok { + s3ForcePathStyleStr = "false" + } + s3ForcePathStyleBool, err := parseutil.ParseBool(s3ForcePathStyleStr) + if err != nil { + return nil, fmt.Errorf("invalid boolean set for s3_force_path_style: '%s'", s3ForcePathStyleStr) + } + disableSSLStr, ok := conf["disable_ssl"] + if !ok { + disableSSLStr = "false" + } + disableSSLBool, err := parseutil.ParseBool(disableSSLStr) + if err != nil { + return nil, fmt.Errorf("invalid boolean set for disable_ssl: '%s'", disableSSLStr) + } credsConfig := &awsutil.CredentialsConfig{ AccessKey: accessKey, @@ -91,8 +108,10 @@ func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, HTTPClient: &http.Client{ Transport: pooledTransport, }, - Endpoint: aws.String(endpoint), - Region: aws.String(region), + Endpoint: aws.String(endpoint), + Region: aws.String(region), + S3ForcePathStyle: aws.Bool(s3ForcePathStyleBool), + DisableSSL: aws.Bool(disableSSLBool), })) _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket}) diff --git a/website/source/docs/configuration/storage/s3.html.md b/website/source/docs/configuration/storage/s3.html.md index d18507e35a..247b1fe973 100644 --- a/website/source/docs/configuration/storage/s3.html.md +++ b/website/source/docs/configuration/storage/s3.html.md @@ -58,9 +58,15 @@ cause Vault to attempt to retrieve credentials from the AWS metadata service. - `session_token` `(string: "")` – Specifies the AWS session token. This can also be provided via the environment variable `AWS_SESSION_TOKEN`. -- `max_parallel` `(string: "128")` – Specifies The maximum number of concurrent +- `max_parallel` `(string: "128")` – Specifies the maximum number of concurrent requests to S3. +- `s3_force_path_style` `(string: "false")` - Specifies whether to use host + bucket style domains with the configured endpoint. + +- `disable_ssl` `(string: "false")` - Specifies if SSL should be used for the + endpoint connection (highly recommended not to disable for production). + ## `s3` Examples ### Default Example From eb7fd85eb5add4f6f532db1400a55abd2b3205e9 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 3 Jan 2018 12:12:07 -0500 Subject: [PATCH 329/329] changelog++ --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a6f0f8b3ba..0f373b4e4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.9.2 (Unreleased) + +IMPROVEMENTS: + + * physical/s3: Allow using paths with S3 for non-AWS deployments [GH-3730] + * physical/s3: Add ability to disable SSL for non-AWS deployments [GH-3730] + ## 0.9.1 (December 21st, 2017) DEPRECATIONS/CHANGES: