mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-12-11 14:35:43 +00:00
Update godeps
This commit is contained in:
536
vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
generated
vendored
536
vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
generated
vendored
@@ -185,9 +185,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque
|
||||
//
|
||||
// There are more than 25 requests in the batch.
|
||||
//
|
||||
// Any individual item in a batch exceeds 400 KB.
|
||||
// Any individual item in a batch exceeds 400 KB.
|
||||
//
|
||||
// The total request size exceeds 16 MB.
|
||||
// The total request size exceeds 16 MB.
|
||||
func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) {
|
||||
req, out := c.BatchWriteItemRequest(input)
|
||||
err := req.Send()
|
||||
@@ -319,6 +319,80 @@ func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, err
|
||||
return out, err
|
||||
}
|
||||
|
||||
const opDescribeLimits = "DescribeLimits"
|
||||
|
||||
// DescribeLimitsRequest generates a request for the DescribeLimits operation.
|
||||
func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opDescribeLimits,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &DescribeLimitsInput{}
|
||||
}
|
||||
|
||||
req = c.newRequest(op, input, output)
|
||||
output = &DescribeLimitsOutput{}
|
||||
req.Data = output
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the current provisioned-capacity limits for your AWS account in a
|
||||
// region, both for the region as a whole and for any one DynamoDB table that
|
||||
// you create there.
|
||||
//
|
||||
// When you establish an AWS account, the account has initial limits on the
|
||||
// maximum read capacity units and write capacity units that you can provision
|
||||
// across all of your DynamoDB tables in a given region. Also, there are per-table
|
||||
// limits that apply when you create a table there. For more information, see
|
||||
// Limits (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
|
||||
// page in the Amazon DynamoDB Developer Guide.
|
||||
//
|
||||
// Although you can increase these limits by filing a case at AWS Support Center
|
||||
// (https://console.aws.amazon.com/support/home#/), obtaining the increase is
|
||||
// not instantaneous. The DescribeLimits API lets you write code to compare
|
||||
// the capacity you are currently using to those limits imposed by your account
|
||||
// so that you have enough time to apply for an increase before you hit a limit.
|
||||
//
|
||||
// For example, you could use one of the AWS SDKs to do the following:
|
||||
//
|
||||
// Call DescribeLimits for a particular region to obtain your current account
|
||||
// limits on provisioned capacity there. Create a variable to hold the aggregate
|
||||
// read capacity units provisioned for all your tables in that region, and one
|
||||
// to hold the aggregate write capacity units. Zero them both. Call ListTables
|
||||
// to obtain a list of all your DynamoDB tables. For each table name listed
|
||||
// by ListTables, do the following:
|
||||
//
|
||||
// Call DescribeTable with the table name. Use the data returned by DescribeTable
|
||||
// to add the read capacity units and write capacity units provisioned for the
|
||||
// table itself to your variables. If the table has one or more global secondary
|
||||
// indexes (GSIs), loop over these GSIs and add their provisioned capacity values
|
||||
// to your variables as well. Report the account limits for that region returned
|
||||
// by DescribeLimits, along with the total current provisioned capacity levels
|
||||
// you have calculated. This will let you see whether you are getting close
|
||||
// to your account-level limits.
|
||||
//
|
||||
// The per-table limits apply only when you are creating a new table. They
|
||||
// restrict the sum of the provisioned capacity of the new table itself and
|
||||
// all its global secondary indexes.
|
||||
//
|
||||
// For existing tables and their GSIs, DynamoDB will not let you increase provisioned
|
||||
// capacity extremely rapidly, but the only upper limit that applies is that
|
||||
// the aggregate provisioned capacity over all your tables and GSIs cannot exceed
|
||||
// either of the per-account limits.
|
||||
//
|
||||
// DescribeLimits should only be called periodically. You can expect throttling
|
||||
// errors if you call it more than once in a minute.
|
||||
//
|
||||
// The DescribeLimits Request element has no content.
|
||||
func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) {
|
||||
req, out := c.DescribeLimitsRequest(input)
|
||||
err := req.Send()
|
||||
return out, err
|
||||
}
|
||||
|
||||
const opDescribeTable = "DescribeTable"
|
||||
|
||||
// DescribeTableRequest generates a request for the DescribeTable operation.
|
||||
@@ -470,8 +544,10 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou
|
||||
// see the ReturnValues description below.
|
||||
//
|
||||
// To prevent a new item from replacing an existing item, use a conditional
|
||||
// put operation with ComparisonOperator set to NULL for the primary key attribute,
|
||||
// or attributes.
|
||||
// expression that contains the attribute_not_exists function with the name
|
||||
// of the attribute being used as the partition key for the table. Since every
|
||||
// record must contain that attribute, the attribute_not_exists function will
|
||||
// only succeed if no matching item exists.
|
||||
//
|
||||
// For more information about using this API, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
|
||||
// in the Amazon DynamoDB Developer Guide.
|
||||
@@ -510,12 +586,12 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output
|
||||
// A Query operation uses the primary key of a table or a secondary index to
|
||||
// directly access items from that table or index.
|
||||
//
|
||||
// Use the KeyConditionExpression parameter to provide a specific hash key
|
||||
// value. The Query operation will return all of the items from the table or
|
||||
// index with that hash key value. You can optionally narrow the scope of the
|
||||
// Query operation by specifying a range key value and a comparison operator
|
||||
// in KeyConditionExpression. You can use the ScanIndexForward parameter to
|
||||
// get results in forward or reverse order, by range key or by index key.
|
||||
// Use the KeyConditionExpression parameter to provide a specific value for
|
||||
// the partition key. The Query operation will return all of the items from
|
||||
// the table or index with that partition key value. You can optionally narrow
|
||||
// the scope of the Query operation by specifying a sort key value and a comparison
|
||||
// operator in KeyConditionExpression. You can use the ScanIndexForward parameter
|
||||
// to get results in forward or reverse order, by sort key.
|
||||
//
|
||||
// Queries that do not return results consume the minimum number of read capacity
|
||||
// units for that type of read operation.
|
||||
@@ -588,9 +664,11 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *
|
||||
// more information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan)
|
||||
// in the Amazon DynamoDB Developer Guide.
|
||||
//
|
||||
// By default, Scan uses eventually consistent reads when acessing the data
|
||||
// in the table or local secondary index. However, you can use strongly consistent
|
||||
// reads instead by setting the ConsistentRead parameter to true.
|
||||
// By default, Scan uses eventually consistent reads when accessing the data
|
||||
// in a table; therefore, the result set might not include the changes to data
|
||||
// in the table immediately before the operation began. If you need a consistent
|
||||
// copy of the data, as of the time that the Scan begins, you can set the ConsistentRead
|
||||
// parameter to true.
|
||||
func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) {
|
||||
req, out := c.ScanRequest(input)
|
||||
err := req.Send()
|
||||
@@ -629,9 +707,7 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque
|
||||
// does not already exist. You can put, delete, or add attribute values. You
|
||||
// can also perform a conditional update on an existing item (insert a new attribute
|
||||
// name-value pair if it doesn't exist, or replace an existing name-value pair
|
||||
// if it has certain expected attribute values). If conditions are specified
|
||||
// and the item does not exist, then the operation fails and a new item is not
|
||||
// created.
|
||||
// if it has certain expected attribute values).
|
||||
//
|
||||
// You can also return the item's attribute values in the same UpdateItem operation
|
||||
// using the ReturnValues parameter.
|
||||
@@ -675,7 +751,7 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req
|
||||
// Create a new global secondary index on the table. Once the index begins
|
||||
// backfilling, you can use UpdateTable to perform other operations.
|
||||
//
|
||||
// UpdateTable is an asynchronous operation; while it is executing, the table
|
||||
// UpdateTable is an asynchronous operation; while it is executing, the table
|
||||
// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot
|
||||
// issue another UpdateTable request. When the table returns to the ACTIVE state,
|
||||
// the UpdateTable operation is complete.
|
||||
@@ -692,7 +768,10 @@ type AttributeDefinition struct {
|
||||
// A name for the attribute.
|
||||
AttributeName *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// The data type for the attribute.
|
||||
// The data type for the attribute, where:
|
||||
//
|
||||
// S - the attribute is of type String N - the attribute is of type Number
|
||||
// B - the attribute is of type Binary
|
||||
AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"`
|
||||
}
|
||||
|
||||
@@ -905,9 +984,9 @@ type BatchGetItemInput struct {
|
||||
//
|
||||
// Keys - An array of primary key attribute values that define specific items
|
||||
// in the table. For each primary key, you must provide all of the key attributes.
|
||||
// For example, with a hash type primary key, you only need to provide the hash
|
||||
// attribute. For a hash-and-range type primary key, you must provide both the
|
||||
// hash attribute and the range attribute.
|
||||
// For example, with a simple primary key, you only need to provide the partition
|
||||
// key value. For a composite key, you must provide both the partition key value
|
||||
// and the sort key value.
|
||||
//
|
||||
// ProjectionExpression - A string that identifies one or more attributes
|
||||
// to retrieve from the table. These attributes can include scalars, sets, or
|
||||
@@ -943,7 +1022,7 @@ type BatchGetItemInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -951,7 +1030,7 @@ type BatchGetItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -1033,9 +1112,9 @@ type BatchWriteItemInput struct {
|
||||
// Key - A map of primary key attribute values that uniquely identify the
|
||||
// ! item. Each entry in this map consists of an attribute name and an attribute
|
||||
// value. For each primary key, you must provide all of the key attributes.
|
||||
// For example, with a hash type primary key, you only need to provide the hash
|
||||
// attribute. For a hash-and-range type primary key, you must provide both the
|
||||
// hash attribute and the range attribute.
|
||||
// For example, with a simple primary key, you only need to provide a value
|
||||
// for the partition key. For a composite primary key, you must provide values
|
||||
// for both the partition key and the sort key.
|
||||
//
|
||||
// PutRequest - Perform a PutItem operation on the specified item. The
|
||||
// item to be put is identified by an Item subelement:
|
||||
@@ -1054,7 +1133,7 @@ type BatchWriteItemInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -1062,7 +1141,7 @@ type BatchWriteItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -1104,8 +1183,8 @@ type BatchWriteItemOutput struct {
|
||||
//
|
||||
// Each entry consists of the following subelements:
|
||||
//
|
||||
// ItemCollectionKey - The hash key value of the item collection. This is
|
||||
// the same as the hash key of the item.
|
||||
// ItemCollectionKey - The partition key value of the item collection. This
|
||||
// is the same as the partition key value of the item.
|
||||
//
|
||||
// SizeEstimateRange - An estimate of item collection size, expressed in
|
||||
// GB. This is a two-element array containing a lower bound and an upper bound
|
||||
@@ -1471,12 +1550,25 @@ type CreateTableInput struct {
|
||||
//
|
||||
// AttributeName - The name of this key attribute.
|
||||
//
|
||||
// KeyType - Determines whether the key attribute is HASH or RANGE.
|
||||
// KeyType - The role that the key attribute will assume:
|
||||
//
|
||||
// For a primary key that consists of a hash attribute, you must provide
|
||||
// exactly one element with a KeyType of HASH.
|
||||
// HASH - partition key
|
||||
//
|
||||
// For a primary key that consists of hash and range attributes, you must provide
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
//
|
||||
// For a simple primary key (partition key), you must provide exactly one element
|
||||
// with a KeyType of HASH.
|
||||
//
|
||||
// For a composite primary key (partition key and sort key), you must provide
|
||||
// exactly two elements, in this order: The first element must have a KeyType
|
||||
// of HASH, and the second element must have a KeyType of RANGE.
|
||||
//
|
||||
@@ -1485,9 +1577,9 @@ type CreateTableInput struct {
|
||||
KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
|
||||
|
||||
// One or more local secondary indexes (the maximum is five) to be created on
|
||||
// the table. Each index is scoped to a given hash key value. There is a 10
|
||||
// GB size limit per hash key; otherwise, the size of a local secondary index
|
||||
// is unconstrained.
|
||||
// the table. Each index is scoped to a given partition key value. There is
|
||||
// a 10 GB size limit per partition key value; otherwise, the size of a local
|
||||
// secondary index is unconstrained.
|
||||
//
|
||||
// Each local secondary index in the array includes the following:
|
||||
//
|
||||
@@ -1495,7 +1587,7 @@ type CreateTableInput struct {
|
||||
// for this table.
|
||||
//
|
||||
// KeySchema - Specifies the key schema for the local secondary index. The
|
||||
// key schema must begin with the same hash key attribute as the table.
|
||||
// key schema must begin with the same partition key as the table.
|
||||
//
|
||||
// Projection - Specifies attributes that are copied (projected) from the
|
||||
// table into the index. These are in addition to the primary key attributes
|
||||
@@ -1612,7 +1704,7 @@ type DeleteItemInput struct {
|
||||
//
|
||||
// These function names are case-sensitive.
|
||||
//
|
||||
// Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
|
||||
// Comparison operators: = | | | | = | = | BETWEEN | IN
|
||||
//
|
||||
// Logical operators: AND | OR | NOT
|
||||
//
|
||||
@@ -1901,15 +1993,15 @@ type DeleteItemInput struct {
|
||||
// key of the item to delete.
|
||||
//
|
||||
// For the primary key, you must provide all of the attributes. For example,
|
||||
// with a hash type primary key, you only need to provide the hash attribute.
|
||||
// For a hash-and-range type primary key, you must provide both the hash attribute
|
||||
// and the range attribute.
|
||||
// with a simple primary key, you only need to provide a value for the partition
|
||||
// key. For a composite primary key, you must provide values for both the partition
|
||||
// key and the sort key.
|
||||
Key map[string]*AttributeValue `type:"map" required:"true"`
|
||||
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -1917,7 +2009,7 @@ type DeleteItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -1976,8 +2068,8 @@ type DeleteItemOutput struct {
|
||||
//
|
||||
// Each ItemCollectionMetrics element consists of:
|
||||
//
|
||||
// ItemCollectionKey - The hash key value of the item collection. This is
|
||||
// the same as the hash key of the item.
|
||||
// ItemCollectionKey - The partition key value of the item collection. This
|
||||
// is the same as the partition key value of the item itself.
|
||||
//
|
||||
// SizeEstimateRange - An estimate of item collection size, in gigabytes. This
|
||||
// value is a two-element array containing a lower bound and an upper bound
|
||||
@@ -2057,6 +2149,54 @@ func (s DeleteTableOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Represents the input of a DescribeLimits operation. Has no content.
|
||||
type DescribeLimitsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DescribeLimitsInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DescribeLimitsInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Represents the output of a DescribeLimits operation.
|
||||
type DescribeLimitsOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The maximum total read capacity units that your account allows you to provision
|
||||
// across all of your tables in this region.
|
||||
AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"`
|
||||
|
||||
// The maximum total write capacity units that your account allows you to provision
|
||||
// across all of your tables in this region.
|
||||
AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
|
||||
|
||||
// The maximum read capacity units that your account allows you to provision
|
||||
// for a new table that you are creating in this region, including the read
|
||||
// capacity units provisioned for its global secondary indexes (GSIs).
|
||||
TableMaxReadCapacityUnits *int64 `min:"1" type:"long"`
|
||||
|
||||
// The maximum write capacity units that your account allows you to provision
|
||||
// for a new table that you are creating in this region, including the write
|
||||
// capacity units provisioned for its global secondary indexes (GSIs).
|
||||
TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DescribeLimitsOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DescribeLimitsOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Represents the input of a DescribeTable operation.
|
||||
type DescribeTableInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
@@ -2372,9 +2512,9 @@ type GetItemInput struct {
|
||||
// key of the item to retrieve.
|
||||
//
|
||||
// For the primary key, you must provide all of the attributes. For example,
|
||||
// with a hash type primary key, you only need to provide the hash attribute.
|
||||
// For a hash-and-range type primary key, you must provide both the hash attribute
|
||||
// and the range attribute.
|
||||
// with a simple primary key, you only need to provide a value for the partition
|
||||
// key. For a composite primary key, you must provide values for both the partition
|
||||
// key and the sort key.
|
||||
Key map[string]*AttributeValue `type:"map" required:"true"`
|
||||
|
||||
// A string that identifies one or more attributes to retrieve from the table.
|
||||
@@ -2394,7 +2534,7 @@ type GetItemInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -2402,7 +2542,7 @@ type GetItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -2457,7 +2597,20 @@ type GlobalSecondaryIndex struct {
|
||||
IndexName *string `min:"3" type:"string" required:"true"`
|
||||
|
||||
// The complete key schema for a global secondary index, which consists of one
|
||||
// or more pairs of attribute names and key types (HASH or RANGE).
|
||||
// or more pairs of attribute names and key types:
|
||||
//
|
||||
// HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
|
||||
|
||||
// Represents attributes that are copied (projected) from the table into an
|
||||
@@ -2490,8 +2643,8 @@ type GlobalSecondaryIndexDescription struct {
|
||||
|
||||
// Indicates whether the index is currently backfilling. Backfilling is the
|
||||
// process of reading items from the table and determining whether they can
|
||||
// be added to the index. (Not all items will qualify: For example, a hash key
|
||||
// attribute cannot have any duplicates.) If an item can be added to the index,
|
||||
// be added to the index. (Not all items will qualify: For example, a partition
|
||||
// key cannot have any duplicate values.) If an item can be added to the index,
|
||||
// DynamoDB will do so. After all items have been processed, the backfilling
|
||||
// operation is complete and Backfilling is false.
|
||||
//
|
||||
@@ -2525,8 +2678,21 @@ type GlobalSecondaryIndexDescription struct {
|
||||
// every six hours. Recent changes might not be reflected in this value.
|
||||
ItemCount *int64 `type:"long"`
|
||||
|
||||
// The complete key schema for the global secondary index, consisting of one
|
||||
// or more pairs of attribute names and key types (HASH or RANGE).
|
||||
// The complete key schema for a global secondary index, which consists of one
|
||||
// or more pairs of attribute names and key types:
|
||||
//
|
||||
// HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
KeySchema []*KeySchemaElement `min:"1" type:"list"`
|
||||
|
||||
// Represents attributes that are copied (projected) from the table into an
|
||||
@@ -2598,8 +2764,8 @@ func (s GlobalSecondaryIndexUpdate) GoString() string {
|
||||
type ItemCollectionMetrics struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The hash key value of the item collection. This value is the same as the
|
||||
// hash key of the item.
|
||||
// The partition key value of the item collection. This value is the same as
|
||||
// the partition key value of the item.
|
||||
ItemCollectionKey map[string]*AttributeValue `type:"map"`
|
||||
|
||||
// An estimate of item collection size, in gigabytes. This value is a two-element
|
||||
@@ -2628,16 +2794,33 @@ func (s ItemCollectionMetrics) GoString() string {
|
||||
// that make up the primary key of a table, or the key attributes of an index.
|
||||
//
|
||||
// A KeySchemaElement represents exactly one attribute of the primary key.
|
||||
// For example, a hash type primary key would be represented by one KeySchemaElement.
|
||||
// A hash-and-range type primary key would require one KeySchemaElement for
|
||||
// the hash attribute, and another KeySchemaElement for the range attribute.
|
||||
// For example, a simple primary key would be represented by one KeySchemaElement
|
||||
// (for the partition key). A composite primary key would require one KeySchemaElement
|
||||
// for the partition key, and another KeySchemaElement for the sort key.
|
||||
//
|
||||
// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).
|
||||
// The data type must be one of String, Number, or Binary. The attribute cannot
|
||||
// be nested within a List or a Map.
|
||||
type KeySchemaElement struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The name of a key attribute.
|
||||
AttributeName *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// The attribute data, consisting of the data type and the attribute value itself.
|
||||
// The role that this key attribute will assume:
|
||||
//
|
||||
// HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
KeyType *string `type:"string" required:"true" enum:"KeyType"`
|
||||
}
|
||||
|
||||
@@ -2655,9 +2838,9 @@ func (s KeySchemaElement) GoString() string {
|
||||
// from the table.
|
||||
//
|
||||
// For each primary key, you must provide all of the key attributes. For example,
|
||||
// with a hash type primary key, you only need to provide the hash attribute.
|
||||
// For a hash-and-range type primary key, you must provide both the hash attribute
|
||||
// and the range attribute.
|
||||
// with a simple primary key, you only need to provide the partition key. For
|
||||
// a composite primary key, you must provide both the partition key and the
|
||||
// sort key.
|
||||
type KeysAndAttributes struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2799,7 +2982,20 @@ type LocalSecondaryIndex struct {
|
||||
IndexName *string `min:"3" type:"string" required:"true"`
|
||||
|
||||
// The complete key schema for the local secondary index, consisting of one
|
||||
// or more pairs of attribute names and key types (HASH or RANGE).
|
||||
// or more pairs of attribute names and key types:
|
||||
//
|
||||
// HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
|
||||
|
||||
// Represents attributes that are copied (projected) from the table into an
|
||||
@@ -2837,8 +3033,21 @@ type LocalSecondaryIndexDescription struct {
|
||||
// every six hours. Recent changes might not be reflected in this value.
|
||||
ItemCount *int64 `type:"long"`
|
||||
|
||||
// The complete index key schema, which consists of one or more pairs of attribute
|
||||
// names and key types (HASH or RANGE).
|
||||
// The complete key schema for the local secondary index, consisting of one
|
||||
// or more pairs of attribute names and key types:
|
||||
//
|
||||
// HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
KeySchema []*KeySchemaElement `min:"1" type:"list"`
|
||||
|
||||
// Represents attributes that are copied (projected) from the table into an
|
||||
@@ -2976,7 +3185,7 @@ type PutItemInput struct {
|
||||
//
|
||||
// These function names are case-sensitive.
|
||||
//
|
||||
// Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
|
||||
// Comparison operators: = | | | | = | = | BETWEEN | IN
|
||||
//
|
||||
// Logical operators: AND | OR | NOT
|
||||
//
|
||||
@@ -3266,9 +3475,9 @@ type PutItemInput struct {
|
||||
// pairs for the item.
|
||||
//
|
||||
// You must provide all of the attributes for the primary key. For example,
|
||||
// with a hash type primary key, you only need to provide the hash attribute.
|
||||
// For a hash-and-range type primary key, you must provide both the hash attribute
|
||||
// and the range attribute.
|
||||
// with a simple primary key, you only need to provide a value for the partition
|
||||
// key. For a composite primary key, you must provide both values for both the
|
||||
// partition key and the sort key.
|
||||
//
|
||||
// If you specify any attributes that are part of an index key, then the data
|
||||
// types for those attributes must match those of the schema in the table's
|
||||
@@ -3283,7 +3492,7 @@ type PutItemInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -3291,7 +3500,7 @@ type PutItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -3312,8 +3521,6 @@ type PutItemInput struct {
|
||||
//
|
||||
// ALL_OLD - If PutItem overwrote an attribute name-value pair, then the
|
||||
// content of the old item is returned.
|
||||
//
|
||||
// Other "Valid Values" are not relevant to PutItem.
|
||||
ReturnValues *string `type:"string" enum:"ReturnValue"`
|
||||
|
||||
// The name of the table to contain the item.
|
||||
@@ -3354,8 +3561,8 @@ type PutItemOutput struct {
|
||||
//
|
||||
// Each ItemCollectionMetrics element consists of:
|
||||
//
|
||||
// ItemCollectionKey - The hash key value of the item collection. This is
|
||||
// the same as the hash key of the item.
|
||||
// ItemCollectionKey - The partition key value of the item collection. This
|
||||
// is the same as the partition key value of the item itself.
|
||||
//
|
||||
// SizeEstimateRange - An estimate of item collection size, in gigabytes. This
|
||||
// value is a two-element array containing a lower bound and an upper bound
|
||||
@@ -3553,50 +3760,51 @@ type QueryInput struct {
|
||||
// The condition that specifies the key value(s) for items to be retrieved by
|
||||
// the Query action.
|
||||
//
|
||||
// The condition must perform an equality test on a single hash key value.
|
||||
// The condition must perform an equality test on a single partition key value.
|
||||
// The condition can also perform one of several comparison tests on a single
|
||||
// range key value. Query can use KeyConditionExpression to retrieve one item
|
||||
// with a given hash and range key value, or several items that have the same
|
||||
// hash key value but different range key values.
|
||||
// sort key value. Query can use KeyConditionExpression to retrieve one item
|
||||
// with a given partition key value and sort key value, or several items that
|
||||
// have the same partition key value but different sort key values.
|
||||
//
|
||||
// The hash key equality test is required, and must be specified in the following
|
||||
// format:
|
||||
// The partition key equality test is required, and must be specified in the
|
||||
// following format:
|
||||
//
|
||||
// hashAttributeName = :hashval
|
||||
// partitionKeyName = :partitionkeyval
|
||||
//
|
||||
// If you also want to provide a range key condition, it must be combined using
|
||||
// AND with the hash key condition. Following is an example, using the = comparison
|
||||
// operator for the range key:
|
||||
// If you also want to provide a condition for the sort key, it must be combined
|
||||
// using AND with the condition for the sort key. Following is an example, using
|
||||
// the = comparison operator for the sort key:
|
||||
//
|
||||
// hashAttributeName = :hashval AND rangeAttributeName = :rangeval
|
||||
// partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval
|
||||
//
|
||||
// Valid comparisons for the range key condition are as follows:
|
||||
// Valid comparisons for the sort key condition are as follows:
|
||||
//
|
||||
// rangeAttributeName = :rangeval - true if the range key is equal to :rangeval.
|
||||
// sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.
|
||||
//
|
||||
// rangeAttributeName < :rangeval - true if the range key is less than :rangeval.
|
||||
// sortKeyName :sortkeyval - true if the sort key value is less than :sortkeyval.
|
||||
//
|
||||
// rangeAttributeName <= :rangeval - true if the range key is less than or
|
||||
// equal to :rangeval.
|
||||
// sortKeyName = :sortkeyval - true if the sort key value is less than or
|
||||
// equal to :sortkeyval.
|
||||
//
|
||||
// rangeAttributeName > :rangeval - true if the range key is greater than
|
||||
// :rangeval.
|
||||
// sortKeyName :sortkeyval - true if the sort key value is greater than
|
||||
// :sortkeyval.
|
||||
//
|
||||
// rangeAttributeName >= :rangeval - true if the range key is greater than
|
||||
// or equal to :rangeval.
|
||||
// sortKeyName = :sortkeyval - true if the sort key value is greater than
|
||||
// or equal to :sortkeyval.
|
||||
//
|
||||
// rangeAttributeName BETWEEN :rangeval1 AND :rangeval2 - true if the range
|
||||
// key is greater than or equal to :rangeval1, and less than or equal to :rangeval2.
|
||||
// sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key
|
||||
// value is greater than or equal to :sortkeyval1, and less than or equal to
|
||||
// :sortkeyval2.
|
||||
//
|
||||
// begins_with (rangeAttributeName, :rangeval) - true if the range key begins
|
||||
// with a particular operand. (You cannot use this function with a range key
|
||||
// begins_with (sortKeyName, :sortkeyval) - true if the sort key value begins
|
||||
// with a particular operand. (You cannot use this function with a sort key
|
||||
// that is of type Number.) Note that the function name begins_with is case-sensitive.
|
||||
//
|
||||
// Use the ExpressionAttributeValues parameter to replace tokens such as
|
||||
// :hashval and :rangeval with actual values at runtime.
|
||||
// :partitionval and :sortval with actual values at runtime.
|
||||
//
|
||||
// You can optionally use the ExpressionAttributeNames parameter to replace
|
||||
// the names of the hash and range attributes with placeholder tokens. This
|
||||
// the names of the partition key and sort key with placeholder tokens. This
|
||||
// option might be necessary if an attribute name conflicts with a DynamoDB
|
||||
// reserved word. For example, the following KeyConditionExpression parameter
|
||||
// causes an error because Size is a reserved word:
|
||||
@@ -3621,17 +3829,17 @@ type QueryInput struct {
|
||||
//
|
||||
// The selection criteria for the query. For a query on a table, you can have
|
||||
// conditions only on the table primary key attributes. You must provide the
|
||||
// hash key attribute name and value as an EQ condition. You can optionally
|
||||
// provide a second condition, referring to the range key attribute.
|
||||
// partition key name and value as an EQ condition. You can optionally provide
|
||||
// a second condition, referring to the sort key.
|
||||
//
|
||||
// If you don't provide a range key condition, all of the items that match
|
||||
// the hash key will be retrieved. If a FilterExpression or QueryFilter is present,
|
||||
// it will be applied after the items are retrieved.
|
||||
// If you don't provide a sort key condition, all of the items that match
|
||||
// the partition key will be retrieved. If a FilterExpression or QueryFilter
|
||||
// is present, it will be applied after the items are retrieved.
|
||||
//
|
||||
// For a query on an index, you can have conditions only on the index key attributes.
|
||||
// You must provide the index hash attribute name and value as an EQ condition.
|
||||
// You can optionally provide a second condition, referring to the index key
|
||||
// range attribute.
|
||||
// You must provide the index partition key name and value as an EQ condition.
|
||||
// You can optionally provide a second condition, referring to the index sort
|
||||
// key.
|
||||
//
|
||||
// Each KeyConditions element consists of an attribute name to compare, along
|
||||
// with the following:
|
||||
@@ -3766,7 +3974,7 @@ type QueryInput struct {
|
||||
// must evaluate to true, rather than all of them.)
|
||||
//
|
||||
// Note that QueryFilter does not allow key attributes. You cannot define a
|
||||
// filter condition on a hash key or range key.
|
||||
// filter condition on a partition key or a sort key.
|
||||
//
|
||||
// Each QueryFilter element consists of an attribute name to compare, along
|
||||
// with the following:
|
||||
@@ -3788,7 +3996,7 @@ type QueryInput struct {
|
||||
// For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html)
|
||||
// in the Amazon DynamoDB Developer Guide.
|
||||
//
|
||||
// ComparisonOperator - A comparator for evaluating attributes. For example,
|
||||
// ComparisonOperator - A comparator for evaluating attributes. For example,
|
||||
// equals, greater than, less than, etc.
|
||||
//
|
||||
// The following comparison operators are available:
|
||||
@@ -3804,7 +4012,7 @@ type QueryInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -3812,26 +4020,26 @@ type QueryInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
|
||||
|
||||
// Specifies the order in which to return the query results - either ascending
|
||||
// (true) or descending (false).
|
||||
// Specifies the order for index traversal: If true (default), the traversal
|
||||
// is performed in ascending order; if false, the traversal is performed in
|
||||
// descending order.
|
||||
//
|
||||
// Items with the same hash key are stored in sorted order by range key .If
|
||||
// the range key data type is Number, the results are stored in numeric order.
|
||||
// For type String, the results are returned in order of ASCII character code
|
||||
// values. For type Binary, DynamoDB treats each byte of the binary data as
|
||||
// unsigned.
|
||||
// Items with the same partition key value are stored in sorted order by sort
|
||||
// key. If the sort key data type is Number, the results are stored in numeric
|
||||
// order. For type String, the results are stored in order of ASCII character
|
||||
// code values. For type Binary, DynamoDB treats each byte of the binary data
|
||||
// as unsigned.
|
||||
//
|
||||
// If ScanIndexForward is true, DynamoDB returns the results in order, by range
|
||||
// key. This is the default behavior.
|
||||
//
|
||||
// If ScanIndexForward is false, DynamoDB sorts the results in descending order
|
||||
// by range key, and then returns the results to the client.
|
||||
// If ScanIndexForward is true, DynamoDB returns the results in the order in
|
||||
// which they are stored (by sort key value). This is the default behavior.
|
||||
// If ScanIndexForward is false, DynamoDB reads the results in reverse order
|
||||
// by sort key value, and then returns the results to the client.
|
||||
ScanIndexForward *bool `type:"boolean"`
|
||||
|
||||
// The attributes to be returned in the result. You can retrieve all item attributes,
|
||||
@@ -3909,7 +4117,7 @@ type QueryOutput struct {
|
||||
//
|
||||
// If you used a QueryFilter in the request, then Count is the number of items
|
||||
// returned after the filter was applied, and ScannedCount is the number of
|
||||
// matching items before> the filter was applied.
|
||||
// matching items before the filter was applied.
|
||||
//
|
||||
// If you did not use a filter in the request, then Count and ScannedCount
|
||||
// are the same.
|
||||
@@ -3994,19 +4202,16 @@ type ScanInput struct {
|
||||
|
||||
// A Boolean value that determines the read consistency model during the scan:
|
||||
//
|
||||
// If ConsistentRead is false, then Scan will use eventually consistent reads.
|
||||
// The data returned from Scan might not contain the results of other recently
|
||||
// completed write operations (PutItem, UpdateItem or DeleteItem). The Scan
|
||||
// response might include some stale data.
|
||||
// If ConsistentRead is false, then the data returned from Scan might not
|
||||
// contain the results from other recently completed write operations (PutItem,
|
||||
// UpdateItem or DeleteItem).
|
||||
//
|
||||
// If ConsistentRead is true, then Scan will use strongly consistent reads.
|
||||
// All of the write operations that completed before the Scan began are guaranteed
|
||||
// to be contained in the Scan response.
|
||||
// If ConsistentRead is true, then all of the write operations that completed
|
||||
// before the Scan began are guaranteed to be contained in the Scan response.
|
||||
//
|
||||
// The default setting for ConsistentRead is false, meaning that eventually
|
||||
// consistent reads will be used.
|
||||
// The default setting for ConsistentRead is false.
|
||||
//
|
||||
// Strongly consistent reads are not supported on global secondary indexes.
|
||||
// The ConsistentRead parameter is not supported on global secondary indexes.
|
||||
// If you scan a global secondary index with ConsistentRead set to true, you
|
||||
// will receive a ValidationException.
|
||||
ConsistentRead *bool `type:"boolean"`
|
||||
@@ -4127,7 +4332,7 @@ type ScanInput struct {
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -4135,7 +4340,7 @@ type ScanInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -4177,7 +4382,7 @@ type ScanInput struct {
|
||||
// For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html)
|
||||
// in the Amazon DynamoDB Developer Guide.
|
||||
//
|
||||
// ComparisonOperator - A comparator for evaluating attributes. For example,
|
||||
// ComparisonOperator - A comparator for evaluating attributes. For example,
|
||||
// equals, greater than, less than, etc.
|
||||
//
|
||||
// The following comparison operators are available:
|
||||
@@ -4370,7 +4575,7 @@ type TableDescription struct {
|
||||
CreationDateTime *time.Time `type:"timestamp" timestampFormat:"unix"`
|
||||
|
||||
// The global secondary indexes, if any, on the table. Each index is scoped
|
||||
// to a given hash key value. Each element is composed of:
|
||||
// to a given partition key value. Each element is composed of:
|
||||
//
|
||||
// Backfilling - If true, then the index is currently in the backfilling
|
||||
// phase. Backfilling occurs only when a new global secondary index is added
|
||||
@@ -4400,7 +4605,7 @@ type TableDescription struct {
|
||||
//
|
||||
// KeySchema - Specifies the complete index key schema. The attribute names
|
||||
// in the key schema must be between 1 and 255 characters (inclusive). The key
|
||||
// schema must begin with the same hash key attribute as the table.
|
||||
// schema must begin with the same partition key as the table.
|
||||
//
|
||||
// Projection - Specifies attributes that are copied (projected) from the
|
||||
// table into the index. These are in addition to the primary key attributes
|
||||
@@ -4438,7 +4643,20 @@ type TableDescription struct {
|
||||
//
|
||||
// AttributeName - The name of the attribute.
|
||||
//
|
||||
// KeyType - The key type for the attribute. Can be either HASH or RANGE.
|
||||
// KeyType - The role of the attribute:
|
||||
//
|
||||
// . HASH - partition key
|
||||
//
|
||||
// RANGE - sort key
|
||||
//
|
||||
// The partition key of an item is also known as its hash attribute. The
|
||||
// term "hash attribute" derives from DynamoDB' usage of an internal hash function
|
||||
// to evenly distribute data items across partitions, based on their partition
|
||||
// key values.
|
||||
//
|
||||
// The sort key of an item is also known as its range attribute. The term "range
|
||||
// attribute" derives from the way DynamoDB stores items with the same partition
|
||||
// key physically close together, in sorted order by the sort key value.
|
||||
//
|
||||
// For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey)
|
||||
// in the Amazon DynamoDB Developer Guide.
|
||||
@@ -4463,7 +4681,7 @@ type TableDescription struct {
|
||||
LatestStreamLabel *string `type:"string"`
|
||||
|
||||
// Represents one or more local secondary indexes on the table. Each index is
|
||||
// scoped to a given hash key value. Tables with one or more local secondary
|
||||
// scoped to a given partition key value. Tables with one or more local secondary
|
||||
// indexes are subject to an item collection size limit, where the amount of
|
||||
// data within a given item collection cannot exceed 10 GB. Each element is
|
||||
// composed of:
|
||||
@@ -4472,7 +4690,7 @@ type TableDescription struct {
|
||||
//
|
||||
// KeySchema - Specifies the complete index key schema. The attribute names
|
||||
// in the key schema must be between 1 and 255 characters (inclusive). The key
|
||||
// schema must begin with the same hash key attribute as the table.
|
||||
// schema must begin with the same partition key as the table.
|
||||
//
|
||||
// Projection - Specifies attributes that are copied (projected) from the
|
||||
// table into the index. These are in addition to the primary key attributes
|
||||
@@ -4589,7 +4807,7 @@ type UpdateItemInput struct {
|
||||
// and the new value for each. If you are updating an attribute that is an index
|
||||
// key attribute for any indexes on that table, the attribute type must match
|
||||
// the index key type defined in the AttributesDefinition of the table description.
|
||||
// You can use UpdateItem to update any nonkey attributes.
|
||||
// You can use UpdateItem to update any non-key attributes.
|
||||
//
|
||||
// Attribute values cannot be null. String and Binary type attributes must
|
||||
// have lengths greater than zero. Set type attributes must not be empty. Requests
|
||||
@@ -4676,7 +4894,7 @@ type UpdateItemInput struct {
|
||||
//
|
||||
// These function names are case-sensitive.
|
||||
//
|
||||
// Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
|
||||
// Comparison operators: = | | | | = | = | BETWEEN | IN
|
||||
//
|
||||
// Logical operators: AND | OR | NOT
|
||||
//
|
||||
@@ -4965,15 +5183,15 @@ type UpdateItemInput struct {
|
||||
// name and a value for that attribute.
|
||||
//
|
||||
// For the primary key, you must provide all of the attributes. For example,
|
||||
// with a hash type primary key, you only need to provide the hash attribute.
|
||||
// For a hash-and-range type primary key, you must provide both the hash attribute
|
||||
// and the range attribute.
|
||||
// with a simple primary key, you only need to provide a value for the partition
|
||||
// key. For a composite primary key, you must provide values for both the partition
|
||||
// key and the sort key.
|
||||
Key map[string]*AttributeValue `type:"map" required:"true"`
|
||||
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -4981,7 +5199,7 @@ type UpdateItemInput struct {
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
@@ -5008,6 +5226,12 @@ type UpdateItemInput struct {
|
||||
// ALL_NEW - All of the attributes of the new version of the item are returned.
|
||||
//
|
||||
// UPDATED_NEW - The new versions of only the updated attributes are returned.
|
||||
//
|
||||
// There is no additional cost associated with requesting a return value
|
||||
// aside from the small network and processing overhead of receiving a larger
|
||||
// response. No Read Capacity Units are consumed.
|
||||
//
|
||||
// Values returned are strongly consistent
|
||||
ReturnValues *string `type:"string" enum:"ReturnValue"`
|
||||
|
||||
// The name of the table containing the item to update.
|
||||
@@ -5303,7 +5527,7 @@ const (
|
||||
// Determines the level of detail about provisioned throughput consumption that
|
||||
// is returned in the response:
|
||||
//
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// INDEXES - The response includes the aggregate ConsumedCapacity for the
|
||||
// operation, together with ConsumedCapacity for each table and secondary index
|
||||
// that was accessed.
|
||||
//
|
||||
@@ -5311,7 +5535,7 @@ const (
|
||||
// any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity
|
||||
// information for table(s).
|
||||
//
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// TOTAL - The response includes only the aggregate ConsumedCapacity for the
|
||||
// operation.
|
||||
//
|
||||
// NONE - No ConsumedCapacity details are included in the response.
|
||||
|
||||
64
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go
generated
vendored
@@ -1,50 +1,3 @@
|
||||
// Package dynamodbattribute provides conversion utilities from dynamodb.AttributeValue
|
||||
// to concrete Go types and structures. These conversion utilities allow you to
|
||||
// convert a Struct, Slice, Map, or Scalar value to or from dynamodb.AttributeValue.
|
||||
// These are most useful to serialize concrete types to dynamodb.AttributeValue for
|
||||
// requests or unmarshalling the dynamodb.AttributeValue into a well known typed form.
|
||||
//
|
||||
// Converting []byte fields to dynamodb.AttributeValue are only currently supported
|
||||
// if the input is a map[string]interface{} type. []byte within typed structs are not
|
||||
// converted correctly and are converted into base64 strings. This is a known bug,
|
||||
// and will be fixed in a later release.
|
||||
//
|
||||
// Convert concrete type to dynamodb.AttributeValue: See (ExampleConvertTo)
|
||||
//
|
||||
// type Record struct {
|
||||
// MyField string
|
||||
// Letters []string
|
||||
// A2Num map[string]int
|
||||
// }
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// r := Record{
|
||||
// MyField: "dynamodbattribute.ConvertToX example",
|
||||
// Letters: []string{"a", "b", "c", "d"},
|
||||
// A2Num: map[string]int{"a": 1, "b": 2, "c": 3},
|
||||
// }
|
||||
// av, err := dynamodbattribute.ConvertTo(r)
|
||||
// fmt.Println(av, err)
|
||||
//
|
||||
// Convert dynamodb.AttributeValue to Concrete type: See (ExampleConvertFrom)
|
||||
//
|
||||
// r2 := Record{}
|
||||
// err = dynamodbattribute.ConvertFrom(av, &r2)
|
||||
// fmt.Println(err, reflect.DeepEqual(r, r2))
|
||||
//
|
||||
// Use Conversion utilities with DynamoDB.PutItem: See ()
|
||||
//
|
||||
// svc := dynamodb.New(nil)
|
||||
// item, err := dynamodbattribute.ConvertToMap(r)
|
||||
// if err != nil {
|
||||
// fmt.Println("Failed to convert", err)
|
||||
// return
|
||||
// }
|
||||
// result, err := svc.PutItem(&dynamodb.PutItemInput{
|
||||
// Item: item,
|
||||
// TableName: aws.String("exampleTable"),
|
||||
// })
|
||||
package dynamodbattribute
|
||||
|
||||
import (
|
||||
@@ -64,6 +17,8 @@ import (
|
||||
//
|
||||
// If in contains any structs, it is first JSON encoded/decoded it to convert it
|
||||
// to a map[string]interface{}, so `json` struct tags are respected.
|
||||
//
|
||||
// Deprecated: Use MarshalMap instead
|
||||
func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -110,6 +65,8 @@ func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err
|
||||
// If v points to a struct, the result is first converted it to a
|
||||
// map[string]interface{}, then JSON encoded/decoded it to convert to a struct,
|
||||
// so `json` struct tags are respected.
|
||||
//
|
||||
// Deprecated: Use UnmarshalMap instead
|
||||
func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -155,8 +112,15 @@ func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (er
|
||||
// ConvertToList accepts an array or slice and converts it to a
|
||||
// []*dynamodb.AttributeValue.
|
||||
//
|
||||
// Converting []byte fields to dynamodb.AttributeValue are only currently supported
|
||||
// if the input is a map[string]interface{} type. []byte within typed structs are not
|
||||
// converted correctly and are converted into base64 strings. This is a known bug,
|
||||
// and will be fixed in a later release.
|
||||
//
|
||||
// If in contains any structs, it is first JSON encoded/decoded it to convert it
|
||||
// to a []interface{}, so `json` struct tags are respected.
|
||||
//
|
||||
// Deprecated: Use MarshalList instead
|
||||
func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -204,6 +168,8 @@ func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error)
|
||||
// If v contains any structs, the result is first converted it to a
|
||||
// []interface{}, then JSON encoded/decoded it to convert to a typed array or
|
||||
// slice, so `json` struct tags are respected.
|
||||
//
|
||||
// Deprecated: Use UnmarshalList instead
|
||||
func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -250,6 +216,8 @@ func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error)
|
||||
//
|
||||
// If in contains any structs, it is first JSON encoded/decoded it to convert it
|
||||
// to a interface{}, so `json` struct tags are respected.
|
||||
//
|
||||
// Deprecated: Use Marshal instead
|
||||
func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -278,6 +246,8 @@ func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) {
|
||||
// If v contains any structs, the result is first converted it to a interface{},
|
||||
// then JSON encoded/decoded it to convert to a struct, so `json` struct tags
|
||||
// are respected.
|
||||
//
|
||||
// Deprecated: Use Unmarshal instead
|
||||
func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
|
||||
628
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
Normal file
628
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
Normal file
@@ -0,0 +1,628 @@
|
||||
package dynamodbattribute
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
)
|
||||
|
||||
// An Unmarshaler is an interface to provide custom unmarshaling of
|
||||
// AttributeValues. Use this to provide custom logic determining
|
||||
// how AttributeValues should be unmarshaled.
|
||||
// type ExampleUnmarshaler struct {
|
||||
// Value int
|
||||
// }
|
||||
//
|
||||
// type (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
|
||||
// if av.N == nil {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// n, err := strconv.ParseInt(*av.N, 10, 0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// u.Value = n
|
||||
// return nil
|
||||
// }
|
||||
type Unmarshaler interface {
|
||||
UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
|
||||
}
|
||||
|
||||
// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types.
|
||||
// Both generic interface{} and concrete types are valid unmarshal
|
||||
// destination types.
|
||||
//
|
||||
// Unmarshal will allocate maps, slices, and pointers as needed to
|
||||
// unmarshal the AttributeValue into the provided type value.
|
||||
//
|
||||
// When unmarshaling AttributeValues into structs Unmarshal matches
|
||||
// the field names of the struct to the AttributeValue Map keys.
|
||||
// Initially it will look for exact field name matching, but will
|
||||
// fall back to case insensitive if not exact match is found.
|
||||
//
|
||||
// With the exception of omitempty, omitemptyelem, binaryset, numberset
|
||||
// and stringset all struct tags used by Marshal are also used by
|
||||
// Unmarshal.
|
||||
//
|
||||
// When decoding AttributeValues to interfaces Unmarshal will use the
|
||||
// following types.
|
||||
//
|
||||
// []byte, AV Binary (B)
|
||||
// [][]byte, AV Binary Set (BS)
|
||||
// bool, AV Boolean (BOOL)
|
||||
// []interface{}, AV List (L)
|
||||
// map[string]interface{}, AV Map (M)
|
||||
// float64, AV Number (N)
|
||||
// Number, AV Number (N) with UseNumber set
|
||||
// []float64, AV Number Set (NS)
|
||||
// []Number, AV Number Set (NS) with UseNumber set
|
||||
// string, AV String (S)
|
||||
// []string, AV String Set (SS)
|
||||
//
|
||||
// If the Decoder option, UseNumber is set numbers will be unmarshaled
|
||||
// as Number values instead of float64. Use this to maintain the original
|
||||
// string formating of the number as it was represented in the AttributeValue.
|
||||
// In addition provides additional opportunities to parse the number
|
||||
// string based on individual use cases.
|
||||
//
|
||||
// When unmarshaling any error that occurs will halt the unmarshal
|
||||
// and return the error.
|
||||
//
|
||||
// The output value provided must be a non-nil pointer
|
||||
func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error {
|
||||
return NewDecoder().Decode(av, out)
|
||||
}
|
||||
|
||||
// UnmarshalMap is an alias for Unmarshal which unmarshals from
|
||||
// a map of AttributeValues.
|
||||
//
|
||||
// The output value provided must be a non-nil pointer
|
||||
func UnmarshalMap(m map[string]*dynamodb.AttributeValue, out interface{}) error {
|
||||
return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out)
|
||||
}
|
||||
|
||||
// UnmarshalList is an alias for Unmarshal func which unmarshals
|
||||
// a slice of AttributeValues.
|
||||
//
|
||||
// The output value provided must be a non-nil pointer
|
||||
func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error {
|
||||
return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
|
||||
}
|
||||
|
||||
// A Decoder provides unmarshaling AttributeValues to Go value types.
|
||||
type Decoder struct {
|
||||
MarshalOptions
|
||||
|
||||
// Instructs the decoder to decode AttributeValue Numbers as
|
||||
// Number type instead of float64 when the destination type
|
||||
// is interface{}. Similar to encoding/json.Number
|
||||
UseNumber bool
|
||||
}
|
||||
|
||||
// NewDecoder creates a new Decoder with default configuration. Use
|
||||
// the `opts` functional options to override the default configuration.
|
||||
func NewDecoder(opts ...func(*Decoder)) *Decoder {
|
||||
d := &Decoder{
|
||||
MarshalOptions: MarshalOptions{
|
||||
SupportJSONTags: true,
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Decode will unmarshal an AttributeValue into a Go value type. An error
|
||||
// will be return if the decoder is unable to unmarshal the AttributeValue
|
||||
// to the provide Go value type.
|
||||
//
|
||||
// The output value provided must be a non-nil pointer
|
||||
func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error {
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() {
|
||||
return &InvalidUnmarshalError{Type: reflect.TypeOf(out)}
|
||||
}
|
||||
|
||||
return d.decode(av, v, tag{})
|
||||
}
|
||||
|
||||
var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
var byteSliceType = reflect.TypeOf([]byte(nil))
|
||||
var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
|
||||
var numberType = reflect.TypeOf(Number(""))
|
||||
|
||||
func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
|
||||
var u Unmarshaler
|
||||
if av == nil || av.NULL != nil {
|
||||
u, v = indirect(v, true)
|
||||
if u != nil {
|
||||
return u.UnmarshalDynamoDBAttributeValue(av)
|
||||
}
|
||||
return d.decodeNull(v)
|
||||
}
|
||||
|
||||
u, v = indirect(v, false)
|
||||
if u != nil {
|
||||
return u.UnmarshalDynamoDBAttributeValue(av)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(av.B) != 0:
|
||||
return d.decodeBinary(av.B, v)
|
||||
case av.BOOL != nil:
|
||||
return d.decodeBool(av.BOOL, v)
|
||||
case len(av.BS) != 0:
|
||||
return d.decodeBinarySet(av.BS, v)
|
||||
case len(av.L) != 0:
|
||||
return d.decodeList(av.L, v)
|
||||
case len(av.M) != 0:
|
||||
return d.decodeMap(av.M, v)
|
||||
case av.N != nil:
|
||||
return d.decodeNumber(av.N, v)
|
||||
case len(av.NS) != 0:
|
||||
return d.decodeNumberSet(av.NS, v)
|
||||
case av.S != nil:
|
||||
return d.decodeString(av.S, v, fieldTag)
|
||||
case len(av.SS) != 0:
|
||||
return d.decodeStringSet(av.SS, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
|
||||
if v.Kind() == reflect.Interface {
|
||||
buf := make([]byte, len(b))
|
||||
copy(buf, b)
|
||||
v.Set(reflect.ValueOf(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v.Interface().(type) {
|
||||
case []byte:
|
||||
if v.IsNil() || v.Cap() < len(b) {
|
||||
v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
|
||||
} else if v.Len() != len(b) {
|
||||
v.SetLen(len(b))
|
||||
}
|
||||
copy(v.Interface().([]byte), b)
|
||||
default:
|
||||
if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
reflect.Copy(v, reflect.ValueOf(b))
|
||||
break
|
||||
}
|
||||
return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool, reflect.Interface:
|
||||
v.Set(reflect.ValueOf(*b))
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Make room for the slice elements if needed
|
||||
if v.IsNil() || v.Cap() < len(bs) {
|
||||
// What about if ignoring nil/empty values?
|
||||
v.Set(reflect.MakeSlice(v.Type(), 0, len(bs)))
|
||||
}
|
||||
case reflect.Array:
|
||||
// Limited to capacity of existing array.
|
||||
case reflect.Interface:
|
||||
set := make([][]byte, len(bs))
|
||||
for i, b := range bs {
|
||||
if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(set))
|
||||
return nil
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "binary set", Type: v.Type()}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Cap() && i < len(bs); i++ {
|
||||
v.SetLen(i + 1)
|
||||
u, elem := indirect(v.Index(i), false)
|
||||
if u != nil {
|
||||
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs})
|
||||
}
|
||||
if err := d.decodeBinary(bs[i], elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeNumber(n *string, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface:
|
||||
i, err := d.decodeNumberToInterface(n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(i))
|
||||
return nil
|
||||
case reflect.String:
|
||||
if v.Type() == numberType { // Support Number value type
|
||||
v.Set(reflect.ValueOf(Number(*n)))
|
||||
return nil
|
||||
}
|
||||
v.Set(reflect.ValueOf(*n))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
i, err := strconv.ParseInt(*n, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.OverflowInt(i) {
|
||||
return &UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("number overflow, %s", *n),
|
||||
Type: v.Type(),
|
||||
}
|
||||
}
|
||||
v.SetInt(i)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
i, err := strconv.ParseUint(*n, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.OverflowUint(i) {
|
||||
return &UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("number overflow, %s", *n),
|
||||
Type: v.Type(),
|
||||
}
|
||||
}
|
||||
v.SetUint(i)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
i, err := strconv.ParseFloat(*n, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if v.OverflowFloat(i) {
|
||||
return &UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("number overflow, %s", *n),
|
||||
Type: v.Type(),
|
||||
}
|
||||
}
|
||||
v.SetFloat(i)
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "number", Type: v.Type()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) {
|
||||
if d.UseNumber {
|
||||
return Number(*n), nil
|
||||
}
|
||||
|
||||
// Default to float64 for all numbers
|
||||
return strconv.ParseFloat(*n, 64)
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Make room for the slice elements if needed
|
||||
if v.IsNil() || v.Cap() < len(ns) {
|
||||
// What about if ignoring nil/empty values?
|
||||
v.Set(reflect.MakeSlice(v.Type(), 0, len(ns)))
|
||||
}
|
||||
case reflect.Array:
|
||||
// Limited to capacity of existing array.
|
||||
case reflect.Interface:
|
||||
if d.UseNumber {
|
||||
set := make([]Number, len(ns))
|
||||
for i, n := range ns {
|
||||
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(set))
|
||||
} else {
|
||||
set := make([]float64, len(ns))
|
||||
for i, n := range ns {
|
||||
if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(set))
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "number set", Type: v.Type()}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Cap() && i < len(ns); i++ {
|
||||
v.SetLen(i + 1)
|
||||
u, elem := indirect(v.Index(i), false)
|
||||
if u != nil {
|
||||
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns})
|
||||
}
|
||||
if err := d.decodeNumber(ns[i], elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Make room for the slice elements if needed
|
||||
if v.IsNil() || v.Cap() < len(avList) {
|
||||
// What about if ignoring nil/empty values?
|
||||
v.Set(reflect.MakeSlice(v.Type(), 0, len(avList)))
|
||||
}
|
||||
case reflect.Array:
|
||||
// Limited to capacity of existing array.
|
||||
case reflect.Interface:
|
||||
s := make([]interface{}, len(avList))
|
||||
for i, av := range avList {
|
||||
if err := d.decode(av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "list", Type: v.Type()}
|
||||
}
|
||||
|
||||
// If v is not a slice, array
|
||||
for i := 0; i < v.Cap() && i < len(avList); i++ {
|
||||
v.SetLen(i + 1)
|
||||
if err := d.decode(avList[i], v.Index(i), tag{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
t := v.Type()
|
||||
if t.Key().Kind() != reflect.String {
|
||||
return &UnmarshalTypeError{Value: "map string key", Type: t.Key()}
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.MakeMap(t))
|
||||
}
|
||||
case reflect.Struct:
|
||||
case reflect.Interface:
|
||||
v.Set(reflect.MakeMap(stringInterfaceMapType))
|
||||
v = v.Elem()
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "map", Type: v.Type()}
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Map {
|
||||
for k, av := range avMap {
|
||||
key := reflect.ValueOf(k)
|
||||
elem := reflect.New(v.Type().Elem()).Elem()
|
||||
if err := d.decode(av, elem, tag{}); err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetMapIndex(key, elem)
|
||||
}
|
||||
} else if v.Kind() == reflect.Struct {
|
||||
fields := unionStructFields(v.Type(), d.MarshalOptions)
|
||||
for k, av := range avMap {
|
||||
if f, ok := fieldByName(fields, k); ok {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
if err := d.decode(av, fv, f.tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeNull(v reflect.Value) error {
|
||||
if v.IsValid() && v.CanSet() {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
|
||||
if fieldTag.AsString {
|
||||
return d.decodeNumber(s, v)
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.String, reflect.Interface:
|
||||
v.Set(reflect.ValueOf(*s))
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "string", Type: v.Type()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Make room for the slice elements if needed
|
||||
if v.IsNil() || v.Cap() < len(ss) {
|
||||
v.Set(reflect.MakeSlice(v.Type(), 0, len(ss)))
|
||||
}
|
||||
case reflect.Array:
|
||||
// Limited to capacity of existing array.
|
||||
case reflect.Interface:
|
||||
set := make([]string, len(ss))
|
||||
for i, s := range ss {
|
||||
if err := d.decodeString(s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v.Set(reflect.ValueOf(set))
|
||||
return nil
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "string set", Type: v.Type()}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Cap() && i < len(ss); i++ {
|
||||
v.SetLen(i + 1)
|
||||
u, elem := indirect(v.Index(i), false)
|
||||
if u != nil {
|
||||
return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss})
|
||||
}
|
||||
if err := d.decodeString(ss[i], elem, tag{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// indirect will walk a value's interface or pointer value types. Returning
|
||||
// the final value or the value a unmarshaler is defined on.
|
||||
//
|
||||
// Based on the enoding/json type reflect value type indirection in Go Stdlib
|
||||
// https://golang.org/src/encoding/json/decode.go indirect func.
|
||||
func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
|
||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
for {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
e := v.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
||||
v = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
if v.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
||||
break
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
if v.Type().NumMethod() > 0 {
|
||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
||||
return u, reflect.Value{}
|
||||
}
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return nil, v
|
||||
}
|
||||
|
||||
// A Number represents a Attributevalue number literal.
|
||||
type Number string
|
||||
|
||||
// Float64 attempts to cast the number ot a float64, returning
|
||||
// the result of the case or error if the case failed.
|
||||
func (n Number) Float64() (float64, error) {
|
||||
return strconv.ParseFloat(string(n), 64)
|
||||
}
|
||||
|
||||
// Int64 attempts to cast the number ot a int64, returning
|
||||
// the result of the case or error if the case failed.
|
||||
func (n Number) Int64() (int64, error) {
|
||||
return strconv.ParseInt(string(n), 10, 64)
|
||||
}
|
||||
|
||||
// Uint64 attempts to cast the number ot a uint64, returning
|
||||
// the result of the case or error if the case failed.
|
||||
func (n Number) Uint64() (uint64, error) {
|
||||
return strconv.ParseUint(string(n), 10, 64)
|
||||
}
|
||||
|
||||
// String returns the raw number represented as a string
|
||||
func (n Number) String() string {
|
||||
return string(n)
|
||||
}
|
||||
|
||||
type emptyOrigError struct{}
|
||||
|
||||
func (e emptyOrigError) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// An UnmarshalTypeError is an error type representing a error
|
||||
// unmarshaling the AttributeValue's element to a Go value type.
|
||||
// Includes details about the AttributeValue type and Go value type.
|
||||
type UnmarshalTypeError struct {
|
||||
emptyOrigError
|
||||
Value string
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// satisfying the error interface
|
||||
func (e *UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
|
||||
}
|
||||
|
||||
// Code returns the code of the error, satisfying the awserr.Error
|
||||
// interface.
|
||||
func (e *UnmarshalTypeError) Code() string {
|
||||
return "UnmarshalTypeError"
|
||||
}
|
||||
|
||||
// Message returns the detailed message of the error, satisfying
|
||||
// the awserr.Error interface.
|
||||
func (e *UnmarshalTypeError) Message() string {
|
||||
return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
|
||||
}
|
||||
|
||||
// An InvalidUnmarshalError is an error type representing an invalid type
|
||||
// encountered while unmarshaling a AttributeValue to a Go value type.
|
||||
type InvalidUnmarshalError struct {
|
||||
emptyOrigError
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// satisfying the error interface
|
||||
func (e *InvalidUnmarshalError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
|
||||
}
|
||||
|
||||
// Code returns the code of the error, satisfying the awserr.Error
|
||||
// interface.
|
||||
func (e *InvalidUnmarshalError) Code() string {
|
||||
return "InvalidUnmarshalError"
|
||||
}
|
||||
|
||||
// Message returns the detailed message of the error, satisfying
|
||||
// the awserr.Error interface.
|
||||
func (e *InvalidUnmarshalError) Message() string {
|
||||
if e.Type == nil {
|
||||
return "cannot unmarshal to nil value"
|
||||
}
|
||||
if e.Type.Kind() != reflect.Ptr {
|
||||
return "cannot unmasrhal to non-pointer value, got " + e.Type.String()
|
||||
}
|
||||
return "cannot unmarshal to nil value, " + e.Type.String()
|
||||
}
|
||||
58
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go
generated
vendored
Normal file
58
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Package dynamodbattribute provides marshaling utilities for marshaling to
|
||||
// dynamodb.AttributeValue types and unmarshaling to Go value types. These
|
||||
// utilities allow you to marshal slices, maps, structs, and scalar values
|
||||
// to and from dynamodb.AttributeValue. These are useful when marshaling
|
||||
// Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or
|
||||
// unmarshaling the dynamodb.AttributeValue back into a Go value type.
|
||||
//
|
||||
// Marshal Go value types to dynamodb.AttributeValue: See (ExampleMarshal)
|
||||
//
|
||||
// type Record struct {
|
||||
// MyField string
|
||||
// Letters []string
|
||||
// A2Num map[string]int
|
||||
// }
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// r := Record{
|
||||
// MyField: "dynamodbattribute.Marshal example",
|
||||
// Letters: []string{"a", "b", "c", "d"},
|
||||
// A2Num: map[string]int{"a": 1, "b": 2, "c": 3},
|
||||
// }
|
||||
// av, err := dynamodbattribute.Marshal(r)
|
||||
// fmt.Println(av, err)
|
||||
//
|
||||
// Unmarshal dynamodb.AttributeValue to Go value type: See (ExampleUnmarshal)
|
||||
//
|
||||
// r2 := Record{}
|
||||
// err = dynamodbattribute.Unmarshal(av, &r2)
|
||||
// fmt.Println(err, reflect.DeepEqual(r, r2))
|
||||
//
|
||||
// Marshal Go value type for DynamoDB.PutItem:
|
||||
//
|
||||
// svc := dynamodb.New(nil)
|
||||
// item, err := dynamodbattribute.MarshalMap(r)
|
||||
// if err != nil {
|
||||
// fmt.Println("Failed to convert", err)
|
||||
// return
|
||||
// }
|
||||
// result, err := svc.PutItem(&dynamodb.PutItemInput{
|
||||
// Item: item,
|
||||
// TableName: aws.String("exampleTable"),
|
||||
// })
|
||||
//
|
||||
//
|
||||
//
|
||||
// The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap
|
||||
// and ConvertFromList methods have been deprecated. The Marshal and Unmarshal
|
||||
// functions should be used instead. The ConvertTo|From marshallers do not
|
||||
// support BinarySet, NumberSet, nor StringSets, and will incorrect marshal
|
||||
// binary data fields in structs as base64 strings.
|
||||
//
|
||||
// The Marshal and Unmarshal functions correct this behavior, and removes
|
||||
// the reliance on encoding.json. `json` struct tags are still supported.
|
||||
// Support for the json.Marshaler nor json.Unmarshaler interfaces have
|
||||
// been removed and replaced with have been replaced with dynamodbattribute.Marshaler
|
||||
// and dynamodbattribute.Unmarshaler interfaces.
|
||||
package dynamodbattribute
|
||||
560
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go
generated
vendored
Normal file
560
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go
generated
vendored
Normal file
@@ -0,0 +1,560 @@
|
||||
package dynamodbattribute
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
)
|
||||
|
||||
// A Marshaler is an interface to provide custom marshalling of Go value types
|
||||
// to AttributeValues. Use this to provide custom logic determining how a
|
||||
// Go Value type should be marshaled.
|
||||
//
|
||||
// type ExampleMarshaler struct {
|
||||
// Value int
|
||||
// }
|
||||
// type (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
|
||||
// n := fmt.Sprintf("%v", m.Value)
|
||||
// av.N = &n
|
||||
//
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
type Marshaler interface {
|
||||
MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
|
||||
}
|
||||
|
||||
// Marshal will serialize the passed in Go value type into a DynamoDB AttributeValue
|
||||
// type. This value can be used in DynamoDB API operations to simplify marshaling
|
||||
// your Go value types into AttributeValues.
|
||||
//
|
||||
// Marshal will recursively transverse the passed in value marshaling its
|
||||
// contents into a AttributeValue. Marshal supports basic scalars
|
||||
// (int,uint,float,bool,string), maps, slices, and structs. Anonymous
|
||||
// nested types are flattened based on Go anonymous type visibility.
|
||||
//
|
||||
// Marshaling slices to AttributeValue will default to a List for all
|
||||
// types except for []byte and [][]byte. []byte will be marshaled as
|
||||
// Binary data (B), and [][]byte will be marshaled as binary data set
|
||||
// (BS).
|
||||
//
|
||||
// `dynamodb` struct tag can be used to control how the value will be
|
||||
// marshaled into a AttributeValue.
|
||||
//
|
||||
// // Field is ignored
|
||||
// Field int `dynamodb:"-"`
|
||||
//
|
||||
// // Field AttributeValue map key "myName"
|
||||
// Field int `dynamodb:"myName"`
|
||||
//
|
||||
// // Field AttributeValue map key "myName", and
|
||||
// // Field is omitted if it is empty
|
||||
// Field int `dynamodb:"myName,omitempty"`
|
||||
//
|
||||
// // Field AttributeValue map key "Field", and
|
||||
// // Field is omitted if it is empty
|
||||
// Field int `dynamodb:",omitempty"`
|
||||
//
|
||||
// // Field's elems will be omitted if empty
|
||||
// // only valid for slices, and maps.
|
||||
// Field []string `dynamodb:",omitemptyelem"`
|
||||
//
|
||||
// // Field will be marshaled as a AttributeValue string
|
||||
// // only value for number types, (int,uint,float)
|
||||
// Field int `dynamodb:",string"`
|
||||
//
|
||||
// // Field will be marshaled as a binary set
|
||||
// Field [][]byte `dynamodb:",binaryset"`
|
||||
//
|
||||
// // Field will be marshaled as a number set
|
||||
// Field []int `dynamodb:",numberset"`
|
||||
//
|
||||
// // Field will be marshaled as a string set
|
||||
// Field []string `dynamodb:",stringset"`
|
||||
//
|
||||
// The omitempty tag is only used during Marshaling and is ignored for
|
||||
// Unmarshal. Any zero value or a value when marshaled results in a
|
||||
// AttributeValue NULL will be added to AttributeValue Maps during struct
|
||||
// marshal. The omitemptyelem tag works the same as omitempty except it
|
||||
// applies to maps and slices instead of struct fields, and will not be
|
||||
// included in the marshaled AttributeValue Map, List, or Set.
|
||||
//
|
||||
// For convenience and backwards compatibility with ConvertTo functions
|
||||
// json struct tags are supported by the Marshal and Unmarshal. If
|
||||
// both json and dynamodbav struct tags are provided the json tag will
|
||||
// be ignored in favor of dynamodbav.
|
||||
//
|
||||
// All struct fields and with anonymous fields, are marshaled unless the
|
||||
// any of the following conditions are meet.
|
||||
//
|
||||
// - the field is not exported
|
||||
// - json or dynamodbav field tag is "-"
|
||||
// - json or dynamodbav field tag specifies "omitempty", and is empty.
|
||||
//
|
||||
// Pointer and interfaces values encode as the value pointed to or contained
|
||||
// in the interface. A nil value encodes as the AttributeValue NULL value.
|
||||
//
|
||||
// Channel, complex, and function values are not encoded and will be skipped
|
||||
// when walking the value to be marshaled.
|
||||
//
|
||||
// When marshaling any error that occurs will halt the marshal and return
|
||||
// the error.
|
||||
//
|
||||
// Marshal cannot represent cyclic data structures and will not handle them.
|
||||
// Passing cyclic structures to Marshal will result in an infinite recursion.
|
||||
func Marshal(in interface{}) (*dynamodb.AttributeValue, error) {
|
||||
return NewEncoder().Encode(in)
|
||||
}
|
||||
|
||||
// MarshalMap is an alias for Marshal func which marshals Go value
|
||||
// type to a map of AttributeValues.
|
||||
func MarshalMap(in interface{}) (map[string]*dynamodb.AttributeValue, error) {
|
||||
av, err := NewEncoder().Encode(in)
|
||||
if err != nil || av == nil || av.M == nil {
|
||||
return map[string]*dynamodb.AttributeValue{}, err
|
||||
}
|
||||
|
||||
return av.M, nil
|
||||
}
|
||||
|
||||
// MarshalList is an alias for Marshal func which marshals Go value
|
||||
// type to a slice of AttributeValues.
|
||||
func MarshalList(in interface{}) ([]*dynamodb.AttributeValue, error) {
|
||||
av, err := NewEncoder().Encode(in)
|
||||
if err != nil || av == nil || av.L == nil {
|
||||
return []*dynamodb.AttributeValue{}, err
|
||||
}
|
||||
|
||||
return av.L, nil
|
||||
}
|
||||
|
||||
// A MarshalOptions is a collection of options shared between marshaling
|
||||
// and unmarshaling
|
||||
type MarshalOptions struct {
|
||||
// States that the encoding/json struct tags should be supported.
|
||||
// if a `dynamodbav` struct tag is also provided the encoding/json
|
||||
// tag will be ignored.
|
||||
//
|
||||
// Enabled by default.
|
||||
SupportJSONTags bool
|
||||
}
|
||||
|
||||
// An Encoder provides marshaling Go value types to AttributeValues.
|
||||
type Encoder struct {
|
||||
MarshalOptions
|
||||
|
||||
// Empty strings, "", will be marked as NULL AttributeValue types.
|
||||
// Empty strings are not valid values for DynamoDB. Will not apply
|
||||
// to lists, sets, or maps. Use the struct tag `omitemptyelem`
|
||||
// to skip empty (zero) values in lists, sets and maps.
|
||||
//
|
||||
// Enabled by default.
|
||||
NullEmptyString bool
|
||||
}
|
||||
|
||||
// NewEncoder creates a new Encoder with default configuration. Use
|
||||
// the `opts` functional options to override the default configuration.
|
||||
func NewEncoder(opts ...func(*Encoder)) *Encoder {
|
||||
e := &Encoder{
|
||||
MarshalOptions: MarshalOptions{
|
||||
SupportJSONTags: true,
|
||||
},
|
||||
NullEmptyString: true,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(e)
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// Encode will marshal a Go value type to an AttributeValue. Returning
|
||||
// the AttributeValue constructed or error.
|
||||
func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) {
|
||||
av := &dynamodb.AttributeValue{}
|
||||
if err := e.encode(av, reflect.ValueOf(in), tag{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return av, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
|
||||
// Handle both pointers and interface conversion into types
|
||||
v = valueElem(v)
|
||||
|
||||
if v.Kind() != reflect.Invalid {
|
||||
if used, err := tryMarshaler(av, v); used {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if fieldTag.OmitEmpty && emptyValue(v) {
|
||||
encodeNull(av)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Invalid:
|
||||
encodeNull(av)
|
||||
case reflect.Struct:
|
||||
return e.encodeStruct(av, v)
|
||||
case reflect.Map:
|
||||
return e.encodeMap(av, v, fieldTag)
|
||||
case reflect.Slice, reflect.Array:
|
||||
return e.encodeSlice(av, v, fieldTag)
|
||||
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
|
||||
// do nothing for unsupported types
|
||||
default:
|
||||
return e.encodeScalar(av, v, fieldTag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value) error {
|
||||
av.M = map[string]*dynamodb.AttributeValue{}
|
||||
fields := unionStructFields(v.Type(), e.MarshalOptions)
|
||||
for _, f := range fields {
|
||||
if f.Name == "" {
|
||||
return &InvalidMarshalError{msg: "map key cannot be empty"}
|
||||
}
|
||||
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
elem := &dynamodb.AttributeValue{}
|
||||
err := e.encode(elem, fv, f.tag)
|
||||
skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if skip {
|
||||
continue
|
||||
}
|
||||
|
||||
av.M[f.Name] = elem
|
||||
}
|
||||
if len(av.M) == 0 {
|
||||
encodeNull(av)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
|
||||
av.M = map[string]*dynamodb.AttributeValue{}
|
||||
for _, key := range v.MapKeys() {
|
||||
keyName := fmt.Sprint(key.Interface())
|
||||
if keyName == "" {
|
||||
return &InvalidMarshalError{msg: "map key cannot be empty"}
|
||||
}
|
||||
|
||||
elemVal := v.MapIndex(key)
|
||||
elem := &dynamodb.AttributeValue{}
|
||||
err := e.encode(elem, elemVal, tag{})
|
||||
skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if skip {
|
||||
continue
|
||||
}
|
||||
|
||||
av.M[keyName] = elem
|
||||
}
|
||||
if len(av.M) == 0 {
|
||||
encodeNull(av)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
|
||||
switch typed := v.Interface().(type) {
|
||||
case []byte:
|
||||
if len(typed) == 0 {
|
||||
encodeNull(av)
|
||||
return nil
|
||||
}
|
||||
av.B = append([]byte{}, typed...)
|
||||
default:
|
||||
var elemFn func(dynamodb.AttributeValue) error
|
||||
|
||||
if fieldTag.AsBinSet || v.Type() == byteSliceSlicetype { // Binary Set
|
||||
av.BS = make([][]byte, 0, v.Len())
|
||||
elemFn = func(elem dynamodb.AttributeValue) error {
|
||||
if elem.B == nil {
|
||||
return &InvalidMarshalError{msg: "binary set must only contain non-nil byte slices"}
|
||||
}
|
||||
av.BS = append(av.BS, elem.B)
|
||||
return nil
|
||||
}
|
||||
} else if fieldTag.AsNumSet { // Number Set
|
||||
av.NS = make([]*string, 0, v.Len())
|
||||
elemFn = func(elem dynamodb.AttributeValue) error {
|
||||
if elem.N == nil {
|
||||
return &InvalidMarshalError{msg: "number set must only contain non-nil string numbers"}
|
||||
}
|
||||
av.NS = append(av.NS, elem.N)
|
||||
return nil
|
||||
}
|
||||
} else if fieldTag.AsStrSet { // String Set
|
||||
av.SS = make([]*string, 0, v.Len())
|
||||
elemFn = func(elem dynamodb.AttributeValue) error {
|
||||
if elem.S == nil {
|
||||
return &InvalidMarshalError{msg: "string set must only contain non-nil strings"}
|
||||
}
|
||||
av.SS = append(av.SS, elem.S)
|
||||
return nil
|
||||
}
|
||||
} else { // List
|
||||
av.L = make([]*dynamodb.AttributeValue, 0, v.Len())
|
||||
elemFn = func(elem dynamodb.AttributeValue) error {
|
||||
av.L = append(av.L, &elem)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if n, err := e.encodeList(v, fieldTag, elemFn); err != nil {
|
||||
return err
|
||||
} else if n == 0 {
|
||||
encodeNull(av)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb.AttributeValue) error) (int, error) {
|
||||
count := 0
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
elem := dynamodb.AttributeValue{}
|
||||
err := e.encode(&elem, v.Index(i), tag{OmitEmpty: fieldTag.OmitEmptyElem})
|
||||
skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, &elem, err)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if skip {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := elemFn(elem); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
|
||||
switch typed := v.Interface().(type) {
|
||||
case bool:
|
||||
av.BOOL = new(bool)
|
||||
*av.BOOL = typed
|
||||
case string:
|
||||
if err := e.encodeString(av, v); err != nil {
|
||||
return err
|
||||
}
|
||||
case Number:
|
||||
s := string(typed)
|
||||
if fieldTag.AsString {
|
||||
av.S = &s
|
||||
} else {
|
||||
av.N = &s
|
||||
}
|
||||
default:
|
||||
// Fallback to encoding numbers, will return invalid type if not supported
|
||||
if err := e.encodeNumber(av, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if fieldTag.AsString && av.NULL == nil && av.N != nil {
|
||||
av.S = av.N
|
||||
av.N = nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) error {
|
||||
if used, err := tryMarshaler(av, v); used {
|
||||
return err
|
||||
}
|
||||
|
||||
var out string
|
||||
switch typed := v.Interface().(type) {
|
||||
case int:
|
||||
out = encodeInt(int64(typed))
|
||||
case int8:
|
||||
out = encodeInt(int64(typed))
|
||||
case int16:
|
||||
out = encodeInt(int64(typed))
|
||||
case int32:
|
||||
out = encodeInt(int64(typed))
|
||||
case int64:
|
||||
out = encodeInt(typed)
|
||||
case uint:
|
||||
out = encodeUint(uint64(typed))
|
||||
case uint8:
|
||||
out = encodeUint(uint64(typed))
|
||||
case uint16:
|
||||
out = encodeUint(uint64(typed))
|
||||
case uint32:
|
||||
out = encodeUint(uint64(typed))
|
||||
case uint64:
|
||||
out = encodeUint(typed)
|
||||
case float32:
|
||||
out = encodeFloat(float64(typed))
|
||||
case float64:
|
||||
out = encodeFloat(typed)
|
||||
default:
|
||||
return &unsupportedMarshalTypeError{Type: v.Type()}
|
||||
}
|
||||
|
||||
av.N = &out
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) error {
|
||||
if used, err := tryMarshaler(av, v); used {
|
||||
return err
|
||||
}
|
||||
|
||||
switch typed := v.Interface().(type) {
|
||||
case string:
|
||||
if len(typed) == 0 && e.NullEmptyString {
|
||||
encodeNull(av)
|
||||
} else {
|
||||
av.S = &typed
|
||||
}
|
||||
default:
|
||||
return &unsupportedMarshalTypeError{Type: v.Type()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeInt(i int64) string {
|
||||
return strconv.FormatInt(i, 10)
|
||||
}
|
||||
func encodeUint(u uint64) string {
|
||||
return strconv.FormatUint(u, 10)
|
||||
}
|
||||
func encodeFloat(f float64) string {
|
||||
return strconv.FormatFloat(f, 'f', -1, 64)
|
||||
}
|
||||
func encodeNull(av *dynamodb.AttributeValue) {
|
||||
t := true
|
||||
*av = dynamodb.AttributeValue{NULL: &t}
|
||||
}
|
||||
|
||||
func valueElem(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func emptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func tryMarshaler(av *dynamodb.AttributeValue, v reflect.Value) (bool, error) {
|
||||
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
if v.Type().NumMethod() == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if m, ok := v.Interface().(Marshaler); ok {
|
||||
return true, m.MarshalDynamoDBAttributeValue(av)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func keepOrOmitEmpty(omitEmpty bool, av *dynamodb.AttributeValue, err error) (bool, error) {
|
||||
if err != nil {
|
||||
if _, ok := err.(*unsupportedMarshalTypeError); ok {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if av.NULL != nil && omitEmpty {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// An InvalidMarshalError is an error type representing an error
|
||||
// occurring when marshaling a Go value type to an AttributeValue.
|
||||
type InvalidMarshalError struct {
|
||||
emptyOrigError
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// satisfying the error interface
|
||||
func (e *InvalidMarshalError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
|
||||
}
|
||||
|
||||
// Code returns the code of the error, satisfying the awserr.Error
|
||||
// interface.
|
||||
func (e *InvalidMarshalError) Code() string {
|
||||
return "InvalidMarshalError"
|
||||
}
|
||||
|
||||
// Message returns the detailed message of the error, satisfying
|
||||
// the awserr.Error interface.
|
||||
func (e *InvalidMarshalError) Message() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
// An unsupportedMarshalTypeError represents a Go value type
|
||||
// which cannot be marshaled into an AttributeValue and should
|
||||
// be skipped by the marshaler.
|
||||
type unsupportedMarshalTypeError struct {
|
||||
emptyOrigError
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// satisfying the error interface
|
||||
func (e *unsupportedMarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Code(), e.Message())
|
||||
}
|
||||
|
||||
// Code returns the code of the error, satisfying the awserr.Error
|
||||
// interface.
|
||||
func (e *unsupportedMarshalTypeError) Code() string {
|
||||
return "unsupportedMarshalTypeError"
|
||||
}
|
||||
|
||||
// Message returns the detailed message of the error, satisfying
|
||||
// the awserr.Error interface.
|
||||
func (e *unsupportedMarshalTypeError) Message() string {
|
||||
return "Go value type " + e.Type.String() + " is not supported"
|
||||
}
|
||||
269
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go
generated
vendored
Normal file
269
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go
generated
vendored
Normal file
@@ -0,0 +1,269 @@
|
||||
package dynamodbattribute
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type field struct {
|
||||
tag
|
||||
|
||||
Name string
|
||||
NameFromTag bool
|
||||
|
||||
Index []int
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
func fieldByName(fields []field, name string) (field, bool) {
|
||||
foldExists := false
|
||||
foldField := field{}
|
||||
|
||||
for _, f := range fields {
|
||||
if f.Name == name {
|
||||
return f, true
|
||||
}
|
||||
if !foldExists && strings.EqualFold(f.Name, name) {
|
||||
foldField = f
|
||||
foldExists = true
|
||||
}
|
||||
}
|
||||
|
||||
return foldField, foldExists
|
||||
}
|
||||
|
||||
func buildField(pIdx []int, i int, sf reflect.StructField, fieldTag tag) field {
|
||||
f := field{
|
||||
Name: sf.Name,
|
||||
Type: sf.Type,
|
||||
tag: fieldTag,
|
||||
}
|
||||
if len(fieldTag.Name) != 0 {
|
||||
f.NameFromTag = true
|
||||
f.Name = fieldTag.Name
|
||||
}
|
||||
|
||||
f.Index = make([]int, len(pIdx)+1)
|
||||
copy(f.Index, pIdx)
|
||||
f.Index[len(pIdx)] = i
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func unionStructFields(t reflect.Type, opts MarshalOptions) []field {
|
||||
fields := enumFields(t, opts)
|
||||
|
||||
sort.Sort(fieldsByName(fields))
|
||||
|
||||
fields = visibleFields(fields)
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// enumFields will recursively iterate through a structure and its nested
|
||||
// anonymous fields.
|
||||
//
|
||||
// Based on the enoding/json struct field enumeration of the Go Stdlib
|
||||
// https://golang.org/src/encoding/json/encode.go typeField func.
|
||||
func enumFields(t reflect.Type, opts MarshalOptions) []field {
|
||||
// Fields to explore
|
||||
current := []field{}
|
||||
next := []field{{Type: t}}
|
||||
|
||||
// count of queued names
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
visited := map[reflect.Type]struct{}{}
|
||||
fields := []field{}
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if _, ok := visited[f.Type]; ok {
|
||||
continue
|
||||
}
|
||||
visited[f.Type] = struct{}{}
|
||||
|
||||
for i := 0; i < f.Type.NumField(); i++ {
|
||||
sf := f.Type.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous {
|
||||
// Ignore unexported and non-anonymous fields
|
||||
// unexported but anonymous field may still be used if
|
||||
// the type has exported nested fields
|
||||
continue
|
||||
}
|
||||
|
||||
fieldTag := tag{}
|
||||
fieldTag.parseAVTag(sf.Tag)
|
||||
if opts.SupportJSONTags && fieldTag == (tag{}) {
|
||||
fieldTag.parseJSONTag(sf.Tag)
|
||||
}
|
||||
|
||||
if fieldTag.Ignore {
|
||||
continue
|
||||
}
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
structField := buildField(f.Index, i, sf, fieldTag)
|
||||
structField.Type = ft
|
||||
|
||||
if !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
fields = append(fields, structField)
|
||||
if count[f.Type] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, structField)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anon struct to explore next round
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, structField)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// visibleFields will return a slice of fields which are visible based on
|
||||
// Go's standard visiblity rules with the exception of ties being broken
|
||||
// by depth and struct tag naming.
|
||||
//
|
||||
// Based on the enoding/json field filtering of the Go Stdlib
|
||||
// https://golang.org/src/encoding/json/encode.go typeField func.
|
||||
func visibleFields(fields []field) []field {
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.Name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.Name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(fieldsByIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
//
|
||||
// Based on the enoding/json field filtering of the Go Stdlib
|
||||
// https://golang.org/src/encoding/json/encode.go dominantField func.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].Index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.Index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.NameFromTag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
// fieldsByName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
//
|
||||
// Based on the enoding/json field filtering of the Go Stdlib
|
||||
// https://golang.org/src/encoding/json/encode.go fieldsByName type.
|
||||
type fieldsByName []field
|
||||
|
||||
func (x fieldsByName) Len() int { return len(x) }
|
||||
|
||||
func (x fieldsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x fieldsByName) Less(i, j int) bool {
|
||||
if x[i].Name != x[j].Name {
|
||||
return x[i].Name < x[j].Name
|
||||
}
|
||||
if len(x[i].Index) != len(x[j].Index) {
|
||||
return len(x[i].Index) < len(x[j].Index)
|
||||
}
|
||||
if x[i].NameFromTag != x[j].NameFromTag {
|
||||
return x[i].NameFromTag
|
||||
}
|
||||
return fieldsByIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// fieldsByIndex sorts field by index sequence.
|
||||
//
|
||||
// Based on the enoding/json field filtering of the Go Stdlib
|
||||
// https://golang.org/src/encoding/json/encode.go fieldsByIndex type.
|
||||
type fieldsByIndex []field
|
||||
|
||||
func (x fieldsByIndex) Len() int { return len(x) }
|
||||
|
||||
func (x fieldsByIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x fieldsByIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].Index {
|
||||
if k >= len(x[j].Index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].Index[k] {
|
||||
return xik < x[j].Index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].Index) < len(x[j].Index)
|
||||
}
|
||||
65
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go
generated
vendored
Normal file
65
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package dynamodbattribute
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type tag struct {
|
||||
Name string
|
||||
Ignore bool
|
||||
OmitEmpty bool
|
||||
OmitEmptyElem bool
|
||||
AsString bool
|
||||
AsBinSet, AsNumSet, AsStrSet bool
|
||||
}
|
||||
|
||||
func (t *tag) parseAVTag(structTag reflect.StructTag) {
|
||||
tagStr := structTag.Get("dynamodbav")
|
||||
if len(tagStr) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.parseTagStr(tagStr)
|
||||
}
|
||||
|
||||
func (t *tag) parseJSONTag(structTag reflect.StructTag) {
|
||||
tagStr := structTag.Get("json")
|
||||
if len(tagStr) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
t.parseTagStr(tagStr)
|
||||
}
|
||||
|
||||
func (t *tag) parseTagStr(tagStr string) {
|
||||
parts := strings.SplitN(tagStr, ",", 2)
|
||||
if len(parts) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if name := parts[0]; name == "-" {
|
||||
t.Name = ""
|
||||
t.Ignore = true
|
||||
} else {
|
||||
t.Name = name
|
||||
t.Ignore = false
|
||||
}
|
||||
|
||||
for _, opt := range parts[1:] {
|
||||
switch opt {
|
||||
case "omitempty":
|
||||
t.OmitEmpty = true
|
||||
case "omitemptyelem":
|
||||
t.OmitEmptyElem = true
|
||||
case "string":
|
||||
t.AsString = true
|
||||
case "binaryset":
|
||||
t.AsBinSet = true
|
||||
case "numberset":
|
||||
t.AsNumSet = true
|
||||
case "stringset":
|
||||
t.AsStrSet = true
|
||||
}
|
||||
}
|
||||
}
|
||||
74
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
generated
vendored
74
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
generated
vendored
@@ -1,74 +0,0 @@
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package dynamodbiface provides an interface for the Amazon DynamoDB.
|
||||
package dynamodbiface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/dynamodb"
|
||||
)
|
||||
|
||||
// DynamoDBAPI is the interface type for dynamodb.DynamoDB.
|
||||
type DynamoDBAPI interface {
|
||||
BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput)
|
||||
|
||||
BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error)
|
||||
|
||||
BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error
|
||||
|
||||
BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput)
|
||||
|
||||
BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error)
|
||||
|
||||
CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput)
|
||||
|
||||
CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error)
|
||||
|
||||
DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput)
|
||||
|
||||
DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error)
|
||||
|
||||
DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput)
|
||||
|
||||
DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error)
|
||||
|
||||
DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput)
|
||||
|
||||
DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error)
|
||||
|
||||
GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput)
|
||||
|
||||
GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error)
|
||||
|
||||
ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput)
|
||||
|
||||
ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error)
|
||||
|
||||
ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error
|
||||
|
||||
PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput)
|
||||
|
||||
PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error)
|
||||
|
||||
QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput)
|
||||
|
||||
Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error)
|
||||
|
||||
QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error
|
||||
|
||||
ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput)
|
||||
|
||||
Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error)
|
||||
|
||||
ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error
|
||||
|
||||
UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput)
|
||||
|
||||
UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error)
|
||||
|
||||
UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput)
|
||||
|
||||
UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error)
|
||||
}
|
||||
|
||||
var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil)
|
||||
38
vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
generated
vendored
38
vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
generated
vendored
@@ -11,14 +11,27 @@ import (
|
||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
||||
)
|
||||
|
||||
// Overview
|
||||
//
|
||||
// This is the Amazon DynamoDB API Reference. This guide provides descriptions
|
||||
// and samples of the low-level DynamoDB API. For information about DynamoDB
|
||||
// application development, see the Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/).
|
||||
// of the low-level DynamoDB API.
|
||||
//
|
||||
// Instead of making the requests to the low-level DynamoDB API directly from
|
||||
// your application, we recommend that you use the AWS Software Development
|
||||
// This guide is intended for use with the following DynamoDB documentation:
|
||||
//
|
||||
// Amazon DynamoDB Getting Started Guide (http://docs.aws.amazon.com/amazondynamodb/latest/gettingstartedguide/)
|
||||
// - provides hands-on exercises that help you learn the basics of working with
|
||||
// DynamoDB. If you are new to DynamoDB, we recommend that you begin with the
|
||||
// Getting Started Guide.
|
||||
//
|
||||
// Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/)
|
||||
// - contains detailed information about DynamoDB concepts, usage, and best
|
||||
// practices.
|
||||
//
|
||||
// Amazon DynamoDB Streams API Reference (http://docs.aws.amazon.com/dynamodbstreams/latest/APIReference/)
|
||||
// - provides descriptions and samples of the DynamoDB Streams API. (For more
|
||||
// information, see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
|
||||
// in the Amazon DynamoDB Developer Guide.)
|
||||
//
|
||||
// Instead of making the requests to the low-level DynamoDB API directly
|
||||
// from your application, we recommend that you use the AWS Software Development
|
||||
// Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary
|
||||
// to call the low-level DynamoDB API directly from your application. The libraries
|
||||
// take care of request authentication, serialization, and connection management.
|
||||
@@ -36,9 +49,8 @@ import (
|
||||
// Managing Tables
|
||||
//
|
||||
// CreateTable - Creates a table with user-specified provisioned throughput
|
||||
// settings. You must designate one attribute as the hash primary key for the
|
||||
// table; you can optionally designate a second attribute as the range primary
|
||||
// key. DynamoDB creates indexes on these key attributes for fast data access.
|
||||
// settings. You must define a primary key for the table - either a simple primary
|
||||
// key (partition key), or a composite primary key (partition key and sort key).
|
||||
// Optionally, you can create one or more secondary indexes, which provide fast
|
||||
// data access using non-key attributes.
|
||||
//
|
||||
@@ -70,10 +82,10 @@ import (
|
||||
// Both eventually consistent and strongly consistent reads can be used.
|
||||
//
|
||||
// Query - Returns one or more items from a table or a secondary index. You
|
||||
// must provide a specific hash key value. You can narrow the scope of the query
|
||||
// using comparison operators against a range key value, or on the index key.
|
||||
// Query supports either eventual or strong consistency. A single response has
|
||||
// a size limit of 1 MB.
|
||||
// must provide a specific value for the partition key. You can narrow the scope
|
||||
// of the query using comparison operators against a sort key value, or on the
|
||||
// index key. Query supports either eventual or strong consistency. A single
|
||||
// response has a size limit of 1 MB.
|
||||
//
|
||||
// Scan - Reads every item in a table; the result set is eventually consistent.
|
||||
// You can limit the number of items returned by filtering the data attributes,
|
||||
|
||||
Reference in New Issue
Block a user