mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-01 11:08:10 +00:00
Merge remote-tracking branch 'oss/master' into database-refactor
This commit is contained in:
@@ -7,7 +7,7 @@ services:
|
||||
- docker
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.8.1
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
|
||||
33
CHANGELOG.md
33
CHANGELOG.md
@@ -1,12 +1,45 @@
|
||||
## 0.7.1 (Unreleased)
|
||||
|
||||
DEPRECATIONS/CHANGES:
|
||||
|
||||
* LDAP Auth Backend: Group membership queries will now run as the `binddn`
|
||||
user when `binddn`/`bindpass` are configured, rather than as the
|
||||
authenticating user as was the case previously.
|
||||
|
||||
FEATURES:
|
||||
|
||||
* **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your
|
||||
Vault physical data store [GH-2546]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* auth/ldap: Use the binding credentials to search group membership rather
|
||||
than the user credentials [GH-2534]
|
||||
* cli/revoke: Add `-self` option to allow revoking the currently active token
|
||||
[GH-2596]
|
||||
* secret/pki: Add `no_store` option that allows certificates to be issued
|
||||
without being stored. This removes the ability to look up and/or add to a
|
||||
CRL but helps with scaling to very large numbers of certificates. [GH-2565]
|
||||
* secret/pki: If used with a role parameter, the `sign-verbatim/<role>`
|
||||
endpoint honors the values of `generate_lease`, `no_store`, `ttl` and
|
||||
`max_ttl` from the given role [GH-2593]
|
||||
* storage/etcd3: Add `discovery_srv` option to query for SRV records to find
|
||||
servers [GH-2521]
|
||||
* storage/s3: Support `max_parallel` option to limit concurrent outstanding
|
||||
requests [GH-2466]
|
||||
* storage/s3: Use pooled transport for http client [GH-2481]
|
||||
* storage/swift: Allow domain values for V3 authentication [GH-2554]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* api: Respect a configured path in Vault's address [GH-2588]
|
||||
* auth/aws-ec2: New bounds added as criteria to allow role creation [GH-2600]
|
||||
* secret/pki: Don't lowercase O/OU values in certs [GH-2555]
|
||||
* secret/pki: Don't attempt to validate IP SANs if none are provided [GH-2574]
|
||||
* secret/ssh: Don't automatically lowercase principles in issued SSH certs
|
||||
[GH-2591]
|
||||
* storage/consul: Properly handle state events rather than timing out
|
||||
[GH-2548]
|
||||
* storage/etcd3: Ensure locks are released if client is improperly shut down
|
||||
[GH-2526]
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"path"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
@@ -329,14 +330,14 @@ func (c *Client) ClearToken() {
|
||||
// NewRequest creates a new raw request object to query the Vault server
|
||||
// configured for this client. This is an advanced method and generally
|
||||
// doesn't need to be called externally.
|
||||
func (c *Client) NewRequest(method, path string) *Request {
|
||||
func (c *Client) NewRequest(method, requestPath string) *Request {
|
||||
req := &Request{
|
||||
Method: method,
|
||||
URL: &url.URL{
|
||||
User: c.addr.User,
|
||||
Scheme: c.addr.Scheme,
|
||||
Host: c.addr.Host,
|
||||
Path: path,
|
||||
Path: path.Join(c.addr.Path, requestPath),
|
||||
},
|
||||
ClientToken: c.token,
|
||||
Params: make(map[string][]string),
|
||||
@@ -344,12 +345,12 @@ func (c *Client) NewRequest(method, path string) *Request {
|
||||
|
||||
var lookupPath string
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/v1/"):
|
||||
lookupPath = strings.TrimPrefix(path, "/v1/")
|
||||
case strings.HasPrefix(path, "v1/"):
|
||||
lookupPath = strings.TrimPrefix(path, "v1/")
|
||||
case strings.HasPrefix(requestPath, "/v1/"):
|
||||
lookupPath = strings.TrimPrefix(requestPath, "/v1/")
|
||||
case strings.HasPrefix(requestPath, "v1/"):
|
||||
lookupPath = strings.TrimPrefix(requestPath, "v1/")
|
||||
default:
|
||||
lookupPath = path
|
||||
lookupPath = requestPath
|
||||
}
|
||||
if c.wrappingLookupFunc != nil {
|
||||
req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
|
||||
|
||||
@@ -1895,7 +1895,7 @@ func (b *backend) handleRoleSecretIDCommon(req *logical.Request, data *framework
|
||||
}
|
||||
|
||||
// Parse the CIDR blocks into a slice
|
||||
secretIDCIDRs := strutil.ParseDedupAndSortStrings(cidrList, ",")
|
||||
secretIDCIDRs := strutil.ParseDedupLowercaseAndSortStrings(cidrList, ",")
|
||||
|
||||
// Ensure that the CIDRs on the secret ID are a subset of that of role's
|
||||
if err := verifyCIDRRoleSecretIDSubset(secretIDCIDRs, role.BoundCIDRList); err != nil {
|
||||
@@ -2086,7 +2086,7 @@ or the 'role/<role_name>/custom-secret-id' endpoints, and if those SecretIDs
|
||||
are used to perform the login operation, then the value of 'token-max-ttl'
|
||||
defines the maximum lifetime of the tokens issued, after which the tokens
|
||||
cannot be renewed. A reauthentication is required after this duration.
|
||||
This value will be croleed by the backend mount's maximum TTL value.`,
|
||||
This value will be capped by the backend mount's maximum TTL value.`,
|
||||
},
|
||||
"role-id": {
|
||||
"Returns the 'role_id' of the role.",
|
||||
|
||||
@@ -31,7 +31,7 @@ type secretIDStorageEntry struct {
|
||||
// operation
|
||||
SecretIDNumUses int `json:"secret_id_num_uses" structs:"secret_id_num_uses" mapstructure:"secret_id_num_uses"`
|
||||
|
||||
// Duration after which this SecretID should expire. This is croleed by
|
||||
// Duration after which this SecretID should expire. This is capped by
|
||||
// the backend mount's max TTL value.
|
||||
SecretIDTTL time.Duration `json:"secret_id_ttl" structs:"secret_id_ttl" mapstructure:"secret_id_ttl"`
|
||||
|
||||
@@ -273,7 +273,7 @@ func (b *backend) validateBindSecretID(req *logical.Request, roleName, secretID,
|
||||
func verifyCIDRRoleSecretIDSubset(secretIDCIDRs []string, roleBoundCIDRList string) error {
|
||||
if len(secretIDCIDRs) != 0 {
|
||||
// Parse the CIDRs on role as a slice
|
||||
roleCIDRs := strutil.ParseDedupAndSortStrings(roleBoundCIDRList, ",")
|
||||
roleCIDRs := strutil.ParseDedupLowercaseAndSortStrings(roleBoundCIDRList, ",")
|
||||
|
||||
// If there are no CIDR blocks on the role, then the subset
|
||||
// requirement would be satisfied
|
||||
|
||||
@@ -350,12 +350,14 @@ func (b *backend) pathRoleCreateUpdate(
|
||||
|
||||
// Ensure that at least one bound is set on the role
|
||||
switch {
|
||||
case roleEntry.BoundAccountID != "":
|
||||
case roleEntry.BoundAmiID != "":
|
||||
case roleEntry.BoundIamInstanceProfileARN != "":
|
||||
case roleEntry.BoundAccountID != "":
|
||||
case roleEntry.BoundRegion != "":
|
||||
case roleEntry.BoundVpcID != "":
|
||||
case roleEntry.BoundSubnetID != "":
|
||||
case roleEntry.BoundIamRoleARN != "":
|
||||
case roleEntry.BoundIamInstanceProfileARN != "":
|
||||
default:
|
||||
|
||||
return logical.ErrorResponse("at least be one bound parameter should be specified on the role"), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestAwsEc2_RoleCrud(t *testing.T) {
|
||||
var err error
|
||||
var resp *logical.Response
|
||||
config := logical.TestBackendConfig()
|
||||
storage := &logical.InmemStorage{}
|
||||
config.StorageView = storage
|
||||
@@ -22,6 +24,23 @@ func TestAwsEc2_RoleCrud(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
role1Data := map[string]interface{}{
|
||||
"bound_vpc_id": "testvpcid",
|
||||
"allow_instance_migration": true,
|
||||
"policies": "testpolicy1,testpolicy2",
|
||||
}
|
||||
roleReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: storage,
|
||||
Path: "role/role1",
|
||||
Data: role1Data,
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("resp: %#v, err: %v", resp, err)
|
||||
}
|
||||
|
||||
roleData := map[string]interface{}{
|
||||
"bound_ami_id": "testamiid",
|
||||
"bound_account_id": "testaccountid",
|
||||
@@ -40,14 +59,9 @@ func TestAwsEc2_RoleCrud(t *testing.T) {
|
||||
"period": "1m",
|
||||
}
|
||||
|
||||
roleReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Storage: storage,
|
||||
Path: "role/testrole",
|
||||
Data: roleData,
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(roleReq)
|
||||
roleReq.Path = "role/testrole"
|
||||
roleReq.Data = roleData
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("resp: %#v, err: %v", resp, err)
|
||||
}
|
||||
|
||||
@@ -104,13 +104,13 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
||||
// Clean connection
|
||||
defer c.Close()
|
||||
|
||||
bindDN, err := b.getBindDN(cfg, c, username)
|
||||
userBindDN, err := b.getUserBindDN(cfg, c, username)
|
||||
if err != nil {
|
||||
return nil, logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: BindDN fetched", "username", username, "binddn", bindDN)
|
||||
b.Logger().Debug("auth/ldap: User BindDN fetched", "username", username, "binddn", userBindDN)
|
||||
}
|
||||
|
||||
if cfg.DenyNullBind && len(password) == 0 {
|
||||
@@ -118,11 +118,22 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
||||
}
|
||||
|
||||
// Try to bind as the login user. This is where the actual authentication takes place.
|
||||
if err = c.Bind(bindDN, password); err != nil {
|
||||
if err = c.Bind(userBindDN, password); err != nil {
|
||||
return nil, logical.ErrorResponse(fmt.Sprintf("LDAP bind failed: %v", err)), nil
|
||||
}
|
||||
|
||||
userDN, err := b.getUserDN(cfg, c, bindDN)
|
||||
// We re-bind to the BindDN if it's defined because we assume
|
||||
// the BindDN should be the one to search, not the user logging in.
|
||||
if cfg.BindDN != "" && cfg.BindPassword != "" {
|
||||
if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
|
||||
return nil, logical.ErrorResponse(fmt.Sprintf("Encountered an error while attempting to re-bind with the BindDN User: %s", err.Error())), nil
|
||||
}
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Re-Bound to original BindDN")
|
||||
}
|
||||
}
|
||||
|
||||
userDN, err := b.getUserDN(cfg, c, userBindDN)
|
||||
if err != nil {
|
||||
return nil, logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
@@ -165,11 +176,11 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
||||
policies = append(policies, group.Policies...)
|
||||
}
|
||||
}
|
||||
if user !=nil && user.Policies != nil {
|
||||
if user != nil && user.Policies != nil {
|
||||
policies = append(policies, user.Policies...)
|
||||
}
|
||||
// Policies from each group may overlap
|
||||
policies = strutil.RemoveDuplicates(policies)
|
||||
policies = strutil.RemoveDuplicates(policies, true)
|
||||
|
||||
if len(policies) == 0 {
|
||||
errStr := "user is not a member of any authorized group"
|
||||
@@ -218,7 +229,7 @@ func (b *backend) getCN(dn string) string {
|
||||
* 2. If upndomain is set, the user dn is constructed as 'username@upndomain'. See https://msdn.microsoft.com/en-us/library/cc223499.aspx
|
||||
*
|
||||
*/
|
||||
func (b *backend) getBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) {
|
||||
func (b *backend) getUserBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (string, error) {
|
||||
bindDN := ""
|
||||
if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") {
|
||||
if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
|
||||
|
||||
@@ -101,7 +101,7 @@ func (b *backend) pathUserRead(
|
||||
func (b *backend) pathUserWrite(
|
||||
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
name := d.Get("name").(string)
|
||||
groups := strutil.ParseDedupAndSortStrings(d.Get("groups").(string), ",")
|
||||
groups := strutil.RemoveDuplicates(strutil.ParseStringSlice(d.Get("groups").(string), ","), false)
|
||||
policies := policyutil.ParsePolicies(d.Get("policies").(string))
|
||||
for i, g := range groups {
|
||||
groups[i] = strings.TrimSpace(g)
|
||||
|
||||
@@ -76,7 +76,7 @@ func createSession(cfg *sessionConfig, s logical.Storage) (*gocql.Session, error
|
||||
}
|
||||
|
||||
clusterConfig.SslOpts = &gocql.SslOptions{
|
||||
Config: *tlsConfig,
|
||||
Config: tlsConfig,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
|
||||
}
|
||||
connString := connConfig.ConnectionString
|
||||
|
||||
db, err := sql.Open("mssql", connString)
|
||||
db, err := sql.Open("sqlserver", connString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func (b *backend) secretCredsRevoke(
|
||||
// we need to drop the database users before we can drop the login and the role
|
||||
// This isn't done in a transaction because even if we fail along the way,
|
||||
// we want to remove as much access as possible
|
||||
stmt, err := db.Prepare(fmt.Sprintf("EXEC sp_msloginmappings '%s';", username))
|
||||
stmt, err := db.Prepare(fmt.Sprintf("EXEC master.dbo.sp_msloginmappings '%s';", username))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ func TestBackend_RSARoles_CSR(t *testing.T) {
|
||||
|
||||
stepCount = len(testCase.Steps)
|
||||
|
||||
testCase.Steps = append(testCase.Steps, generateRoleSteps(t, false)...)
|
||||
testCase.Steps = append(testCase.Steps, generateRoleSteps(t, true)...)
|
||||
if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 {
|
||||
for i, v := range testCase.Steps {
|
||||
fmt.Printf("Step %d:\n%+v\n\n", i+stepCount, v)
|
||||
@@ -1471,7 +1471,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
|
||||
}
|
||||
cert := parsedCertBundle.Certificate
|
||||
|
||||
expected := strutil.ParseDedupAndSortStrings(role.OU, ",")
|
||||
expected := strutil.ParseDedupLowercaseAndSortStrings(role.OU, ",")
|
||||
if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) {
|
||||
return fmt.Errorf("Error: returned certificate has OU of %s but %s was specified in the role.", cert.Subject.OrganizationalUnit, expected)
|
||||
}
|
||||
@@ -1492,7 +1492,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
|
||||
}
|
||||
cert := parsedCertBundle.Certificate
|
||||
|
||||
expected := strutil.ParseDedupAndSortStrings(role.Organization, ",")
|
||||
expected := strutil.ParseDedupLowercaseAndSortStrings(role.Organization, ",")
|
||||
if !reflect.DeepEqual(cert.Subject.Organization, expected) {
|
||||
return fmt.Errorf("Error: returned certificate has Organization of %s but %s was specified in the role.", cert.Subject.Organization, expected)
|
||||
}
|
||||
@@ -1787,6 +1787,12 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep {
|
||||
}
|
||||
// IP SAN tests
|
||||
{
|
||||
roleVals.UseCSRSANs = true
|
||||
roleVals.AllowIPSANs = false
|
||||
issueTestStep.ErrorOk = false
|
||||
addTests(nil)
|
||||
|
||||
roleVals.UseCSRSANs = false
|
||||
issueVals.IPSANs = "127.0.0.1,::1"
|
||||
issueTestStep.ErrorOk = true
|
||||
addTests(nil)
|
||||
@@ -1978,6 +1984,172 @@ func TestBackend_PathFetchCertList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend_SignVerbatim(t *testing.T) {
|
||||
// create the backend
|
||||
config := logical.TestBackendConfig()
|
||||
storage := &logical.InmemStorage{}
|
||||
config.StorageView = storage
|
||||
|
||||
b := Backend()
|
||||
_, err := b.Setup(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// generate root
|
||||
rootData := map[string]interface{}{
|
||||
"common_name": "test.com",
|
||||
"ttl": "172800",
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "root/generate/internal",
|
||||
Storage: storage,
|
||||
Data: rootData,
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to generate root, %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a CSR and key
|
||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
csrReq := &x509.CertificateRequest{
|
||||
Subject: pkix.Name{
|
||||
CommonName: "foo.bar.com",
|
||||
},
|
||||
}
|
||||
csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(csr) == 0 {
|
||||
t.Fatal("generated csr is empty")
|
||||
}
|
||||
pemCSR := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
Bytes: csr,
|
||||
})
|
||||
if len(pemCSR) == 0 {
|
||||
t.Fatal("pem csr is empty")
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "sign-verbatim",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"csr": string(pemCSR),
|
||||
},
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.Secret != nil {
|
||||
t.Fatal("secret is not nil")
|
||||
}
|
||||
|
||||
// create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs
|
||||
roleData := map[string]interface{}{
|
||||
"ttl": "4h",
|
||||
"max_ttl": "8h",
|
||||
}
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "roles/test",
|
||||
Storage: storage,
|
||||
Data: roleData,
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to create a role, %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "sign-verbatim/test",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"csr": string(pemCSR),
|
||||
"ttl": "5h",
|
||||
},
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.Secret != nil {
|
||||
t.Fatal("got a lease when we should not have")
|
||||
}
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "sign-verbatim/test",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"csr": string(pemCSR),
|
||||
"ttl": "12h",
|
||||
},
|
||||
})
|
||||
if resp != nil && !resp.IsError() {
|
||||
t.Fatalf("sign-verbatim signed too-large-ttl'd CSR: %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// now check that if we set generate-lease it takes it from the role and the TTLs match
|
||||
roleData = map[string]interface{}{
|
||||
"ttl": "4h",
|
||||
"max_ttl": "8h",
|
||||
"generate_lease": true,
|
||||
}
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "roles/test",
|
||||
Storage: storage,
|
||||
Data: roleData,
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to create a role, %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "sign-verbatim/test",
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"csr": string(pemCSR),
|
||||
"ttl": "5h",
|
||||
},
|
||||
})
|
||||
if resp != nil && resp.IsError() {
|
||||
t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.Secret == nil {
|
||||
t.Fatalf("secret is nil, response is %#v", *resp)
|
||||
}
|
||||
if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) {
|
||||
t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
rsaCAKey string = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAmPQlK7xD5p+E8iLQ8XlVmll5uU2NKMxKY3UF5tbh+0vkc+Fy
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/vault/helper/certutil"
|
||||
"github.com/hashicorp/vault/helper/errutil"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
@@ -596,7 +597,7 @@ func generateCreationBundle(b *backend,
|
||||
if csr == nil || !role.UseCSRSANs {
|
||||
cnAltRaw, ok := data.GetOk("alt_names")
|
||||
if ok {
|
||||
cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",")
|
||||
cnAlt := strutil.ParseDedupLowercaseAndSortStrings(cnAltRaw.(string), ",")
|
||||
for _, v := range cnAlt {
|
||||
if strings.Contains(v, "@") {
|
||||
emailAddresses = append(emailAddresses, v)
|
||||
@@ -634,11 +635,13 @@ func generateCreationBundle(b *backend,
|
||||
var ipAltInt interface{}
|
||||
{
|
||||
if csr != nil && role.UseCSRSANs {
|
||||
if !role.AllowIPSANs {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf(
|
||||
"IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")}
|
||||
if len(csr.IPAddresses) > 0 {
|
||||
if !role.AllowIPSANs {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf(
|
||||
"IP Subject Alternative Names are not allowed in this role, but was provided some via CSR")}
|
||||
}
|
||||
ipAddresses = csr.IPAddresses
|
||||
}
|
||||
ipAddresses = csr.IPAddresses
|
||||
} else {
|
||||
ipAltInt, ok = data.GetOk("ip_sans")
|
||||
if ok {
|
||||
@@ -665,7 +668,7 @@ func generateCreationBundle(b *backend,
|
||||
ou := []string{}
|
||||
{
|
||||
if role.OU != "" {
|
||||
ou = strutil.ParseDedupAndSortStrings(role.OU, ",")
|
||||
ou = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.OU, ","), false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -673,7 +676,7 @@ func generateCreationBundle(b *backend,
|
||||
organization := []string{}
|
||||
{
|
||||
if role.Organization != "" {
|
||||
organization = strutil.ParseDedupAndSortStrings(role.Organization, ",")
|
||||
organization = strutil.RemoveDuplicates(strutil.ParseStringSlice(role.Organization, ","), false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -693,7 +696,7 @@ func generateCreationBundle(b *backend,
|
||||
if len(ttlField) == 0 {
|
||||
ttl = b.System().DefaultLeaseTTL()
|
||||
} else {
|
||||
ttl, err = time.ParseDuration(ttlField)
|
||||
ttl, err = parseutil.ParseDurationSecond(ttlField)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf(
|
||||
"invalid requested ttl: %s", err)}
|
||||
@@ -703,7 +706,7 @@ func generateCreationBundle(b *backend,
|
||||
if len(role.MaxTTL) == 0 {
|
||||
maxTTL = b.System().MaxLeaseTTL()
|
||||
} else {
|
||||
maxTTL, err = time.ParseDuration(role.MaxTTL)
|
||||
maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL)
|
||||
if err != nil {
|
||||
return nil, errutil.UserError{Err: fmt.Sprintf(
|
||||
"invalid ttl: %s", err)}
|
||||
|
||||
@@ -116,9 +116,20 @@ func (b *backend) pathSign(
|
||||
func (b *backend) pathSignVerbatim(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
|
||||
roleName := data.Get("role").(string)
|
||||
|
||||
// Get the role if one was specified
|
||||
role, err := b.getRole(req.Storage, roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ttl := b.System().DefaultLeaseTTL()
|
||||
role := &roleEntry{
|
||||
maxTTL := b.System().MaxLeaseTTL()
|
||||
|
||||
entry := &roleEntry{
|
||||
TTL: ttl.String(),
|
||||
MaxTTL: maxTTL.String(),
|
||||
AllowLocalhost: true,
|
||||
AllowAnyName: true,
|
||||
AllowIPSANs: true,
|
||||
@@ -126,9 +137,25 @@ func (b *backend) pathSignVerbatim(
|
||||
KeyType: "any",
|
||||
UseCSRCommonName: true,
|
||||
UseCSRSANs: true,
|
||||
GenerateLease: new(bool),
|
||||
}
|
||||
|
||||
return b.pathIssueSignCert(req, data, role, true, true)
|
||||
if role != nil {
|
||||
if role.TTL != "" {
|
||||
entry.TTL = role.TTL
|
||||
}
|
||||
if role.MaxTTL != "" {
|
||||
entry.MaxTTL = role.MaxTTL
|
||||
}
|
||||
entry.NoStore = role.NoStore
|
||||
}
|
||||
|
||||
*entry.GenerateLease = false
|
||||
if role != nil && role.GenerateLease != nil {
|
||||
*entry.GenerateLease = *role.GenerateLease
|
||||
}
|
||||
|
||||
return b.pathIssueSignCert(req, data, entry, true, true)
|
||||
}
|
||||
|
||||
func (b *backend) pathIssueSignCert(
|
||||
@@ -240,12 +267,14 @@ func (b *backend) pathIssueSignCert(
|
||||
resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now())
|
||||
}
|
||||
|
||||
err = req.Storage.Put(&logical.StorageEntry{
|
||||
Key: "certs/" + cb.SerialNumber,
|
||||
Value: parsedBundle.CertificateBytes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
|
||||
if !role.NoStore {
|
||||
err = req.Storage.Put(&logical.StorageEntry{
|
||||
Key: "certs/" + cb.SerialNumber,
|
||||
Value: parsedBundle.CertificateBytes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to store certificate locally: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
|
||||
@@ -204,6 +204,17 @@ to the CRL. When large number of certificates are generated with long
|
||||
lifetimes, it is recommended that lease generation be disabled, as large amount of
|
||||
leases adversely affect the startup time of Vault.`,
|
||||
},
|
||||
"no_store": &framework.FieldSchema{
|
||||
Type: framework.TypeBool,
|
||||
Default: false,
|
||||
Description: `
|
||||
If set, certificates issued/signed against this role will not be stored in the
|
||||
in the storage backend. This can improve performance when issuing large numbers
|
||||
of certificates. However, certificates issued in this way cannot be enumerated
|
||||
or revoked, so this option is recommended only for certificates that are
|
||||
non-sensitive, or extremely short-lived. This option implies a value of "false"
|
||||
for "generate_lease".`,
|
||||
},
|
||||
},
|
||||
|
||||
Callbacks: map[logical.Operation]framework.OperationFunc{
|
||||
@@ -384,9 +395,15 @@ func (b *backend) pathRoleCreate(
|
||||
OU: data.Get("ou").(string),
|
||||
Organization: data.Get("organization").(string),
|
||||
GenerateLease: new(bool),
|
||||
NoStore: data.Get("no_store").(bool),
|
||||
}
|
||||
|
||||
*entry.GenerateLease = data.Get("generate_lease").(bool)
|
||||
// no_store implies generate_lease := false
|
||||
if entry.NoStore {
|
||||
*entry.GenerateLease = false
|
||||
} else {
|
||||
*entry.GenerateLease = data.Get("generate_lease").(bool)
|
||||
}
|
||||
|
||||
if entry.KeyType == "rsa" && entry.KeyBits < 2048 {
|
||||
return logical.ErrorResponse("RSA keys < 2048 bits are unsafe and not supported"), nil
|
||||
@@ -504,6 +521,7 @@ type roleEntry struct {
|
||||
OU string `json:"ou" structs:"ou" mapstructure:"ou"`
|
||||
Organization string `json:"organization" structs:"organization" mapstructure:"organization"`
|
||||
GenerateLease *bool `json:"generate_lease,omitempty" structs:"generate_lease,omitempty"`
|
||||
NoStore bool `json:"no_store" structs:"no_store" mapstructure:"no_store"`
|
||||
}
|
||||
|
||||
const pathListRolesHelpSyn = `List the existing roles in this backend`
|
||||
|
||||
@@ -124,6 +124,114 @@ func TestPki_RoleGenerateLease(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPki_RoleNoStore(t *testing.T) {
|
||||
var resp *logical.Response
|
||||
var err error
|
||||
b, storage := createBackendWithStorage(t)
|
||||
|
||||
roleData := map[string]interface{}{
|
||||
"allowed_domains": "myvault.com",
|
||||
"ttl": "5h",
|
||||
}
|
||||
|
||||
roleReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "roles/testrole",
|
||||
Storage: storage,
|
||||
Data: roleData,
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
roleReq.Operation = logical.ReadOperation
|
||||
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
// By default, no_store should be `false`
|
||||
noStore := resp.Data["no_store"].(bool)
|
||||
if noStore {
|
||||
t.Fatalf("no_store should not be set by default")
|
||||
}
|
||||
|
||||
// Make sure that setting no_store to `true` works properly
|
||||
roleReq.Operation = logical.UpdateOperation
|
||||
roleReq.Path = "roles/testrole_nostore"
|
||||
roleReq.Data["no_store"] = true
|
||||
roleReq.Data["allowed_domain"] = "myvault.com"
|
||||
roleReq.Data["allow_subdomains"] = true
|
||||
roleReq.Data["ttl"] = "5h"
|
||||
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
roleReq.Operation = logical.ReadOperation
|
||||
resp, err = b.HandleRequest(roleReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
noStore = resp.Data["no_store"].(bool)
|
||||
if !noStore {
|
||||
t.Fatalf("no_store should have been set to true")
|
||||
}
|
||||
|
||||
// issue a certificate and test that it's not stored
|
||||
caData := map[string]interface{}{
|
||||
"common_name": "myvault.com",
|
||||
"ttl": "5h",
|
||||
"ip_sans": "127.0.0.1",
|
||||
}
|
||||
caReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "root/generate/internal",
|
||||
Storage: storage,
|
||||
Data: caData,
|
||||
}
|
||||
resp, err = b.HandleRequest(caReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
issueData := map[string]interface{}{
|
||||
"common_name": "cert.myvault.com",
|
||||
"format": "pem",
|
||||
"ip_sans": "127.0.0.1",
|
||||
"ttl": "1h",
|
||||
}
|
||||
issueReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "issue/testrole_nostore",
|
||||
Storage: storage,
|
||||
Data: issueData,
|
||||
}
|
||||
|
||||
resp, err = b.HandleRequest(issueReq)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
|
||||
// list certs
|
||||
resp, err = b.HandleRequest(&logical.Request{
|
||||
Operation: logical.ListOperation,
|
||||
Path: "certs",
|
||||
Storage: storage,
|
||||
})
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("bad: err: %v resp: %#v", err, resp)
|
||||
}
|
||||
if len(resp.Data["keys"].([]string)) != 1 {
|
||||
t.Fatalf("Only the CA certificate should be stored: %#v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPki_CertsLease(t *testing.T) {
|
||||
var resp *logical.Response
|
||||
var err error
|
||||
|
||||
@@ -143,15 +143,17 @@ func pathRoles(b *backend) *framework.Path {
|
||||
"allowed_users": &framework.FieldSchema{
|
||||
Type: framework.TypeString,
|
||||
Description: `
|
||||
[Optional for all types]
|
||||
If this option is not specified, client can request for a
|
||||
[Optional for all types] [Works differently for CA type]
|
||||
If this option is not specified, or is '*', client can request a
|
||||
credential for any valid user at the remote host, including the
|
||||
admin user. If only certain usernames are to be allowed, then
|
||||
this list enforces it. If this field is set, then credentials
|
||||
can only be created for default_user and usernames present in
|
||||
this list. Setting this option will enable all the users with
|
||||
access this role to fetch credentials for all other usernames
|
||||
in this list. Use with caution.
|
||||
in this list. Use with caution. N.B.: with the CA type, an empty
|
||||
list means that no users are allowed; explicitly specify '*' to
|
||||
allow any user.
|
||||
`,
|
||||
},
|
||||
"allowed_domains": &framework.FieldSchema{
|
||||
|
||||
@@ -203,8 +203,8 @@ func (b *backend) calculateValidPrincipals(data *framework.FieldData, defaultPri
|
||||
validPrincipals = defaultPrincipal
|
||||
}
|
||||
|
||||
parsedPrincipals := strutil.ParseDedupAndSortStrings(validPrincipals, ",")
|
||||
allowedPrincipals := strutil.ParseDedupAndSortStrings(principalsAllowedByRole, ",")
|
||||
parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false)
|
||||
allowedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false)
|
||||
switch {
|
||||
case len(parsedPrincipals) == 0:
|
||||
// There is nothing to process
|
||||
|
||||
@@ -58,12 +58,12 @@ Usage: vault audit-disable [options] id
|
||||
|
||||
Disable an audit backend.
|
||||
|
||||
Once the audit backend is disabled, no more audit logs will be sent to
|
||||
Once the audit backend is disabled no more audit logs will be sent to
|
||||
it. The data associated with the audit backend isn't affected.
|
||||
|
||||
The "id" parameter should map to the id used with "audit-enable". If
|
||||
no specific ID was specified, then it is the name of the backend (the
|
||||
type of the backend).
|
||||
The "id" parameter should map to the "path" used in "audit-enable". If
|
||||
no path was provided to "audit-enable" you should use the backend
|
||||
type (e.g. "file").
|
||||
|
||||
General Options:
|
||||
` + meta.GeneralOptionsUsage()
|
||||
|
||||
@@ -312,7 +312,7 @@ func (c *AuthCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: vault auth [options] [auth-information]
|
||||
|
||||
Authenticate with Vault with the given token or via any supported
|
||||
Authenticate with Vault using the given token or via any supported
|
||||
authentication backend.
|
||||
|
||||
By default, the -method is assumed to be token. If not supplied via the
|
||||
@@ -399,7 +399,7 @@ func (h *tokenAuthHandler) Help() string {
|
||||
help := `
|
||||
No method selected with the "-method" flag, so the "auth" command assumes
|
||||
you'll be using raw token authentication. For this, specify the token to
|
||||
authenticate as as the parameter to "vault auth". Example:
|
||||
authenticate as the parameter to "vault auth". Example:
|
||||
|
||||
vault auth 123456
|
||||
|
||||
|
||||
@@ -58,10 +58,10 @@ Usage: vault auth-disable [options] path
|
||||
|
||||
Disable an already-enabled auth provider.
|
||||
|
||||
Once the auth provider is disabled, that path cannot be used anymore
|
||||
Once the auth provider is disabled its path can no longer be used
|
||||
to authenticate. All access tokens generated via the disabled auth provider
|
||||
will be revoked. This command will block until all tokens are revoked.
|
||||
If the command is exited early, the tokens will still be revoked.
|
||||
If the command is exited early the tokens will still be revoked.
|
||||
|
||||
General Options:
|
||||
` + meta.GeneralOptionsUsage()
|
||||
|
||||
@@ -82,7 +82,7 @@ General Options:
|
||||
` + meta.GeneralOptionsUsage() + `
|
||||
Auth Enable Options:
|
||||
|
||||
-description=<desc> Human-friendly description of the purpose for the
|
||||
-description=<desc> Human-friendly description of the purpose of the
|
||||
auth provider. This shows up in the auth -methods command.
|
||||
|
||||
-path=<path> Mount point for the auth provider. This defaults
|
||||
|
||||
@@ -295,12 +295,12 @@ Usage: vault generate-root [options] [key]
|
||||
|
||||
'generate-root' is used to create a new root token.
|
||||
|
||||
Root generation can only be done when the Vault is already unsealed. The
|
||||
Root generation can only be done when the vault is already unsealed. The
|
||||
operation is done online, but requires that a threshold of the current unseal
|
||||
keys be provided.
|
||||
|
||||
One (and only one) of the following must be provided at attempt
|
||||
initialization time:
|
||||
One (and only one) of the following must be provided when initializing the
|
||||
root generation attempt:
|
||||
|
||||
1) A 16-byte, base64-encoded One Time Password (OTP) provided in the '-otp'
|
||||
flag; the token is XOR'd with this value before it is returned once the final
|
||||
|
||||
@@ -245,11 +245,11 @@ func (c *InitCommand) runInit(check bool, initRequest *api.InitRequest) int {
|
||||
c.Ui.Output(fmt.Sprintf(
|
||||
"\n"+
|
||||
"Vault initialized with %d keys and a key threshold of %d. Please\n"+
|
||||
"securely distribute the above keys. When the Vault is re-sealed,\n"+
|
||||
"securely distribute the above keys. When the vault is re-sealed,\n"+
|
||||
"restarted, or stopped, you must provide at least %d of these keys\n"+
|
||||
"to unseal it again.\n\n"+
|
||||
"Vault does not store the master key. Without at least %d keys,\n"+
|
||||
"your Vault will remain permanently sealed.",
|
||||
"your vault will remain permanently sealed.",
|
||||
initRequest.SecretShares,
|
||||
initRequest.SecretThreshold,
|
||||
initRequest.SecretThreshold,
|
||||
@@ -301,10 +301,10 @@ Usage: vault init [options]
|
||||
Initialize a new Vault server.
|
||||
|
||||
This command connects to a Vault server and initializes it for the
|
||||
first time. This sets up the initial set of master keys and sets up the
|
||||
first time. This sets up the initial set of master keys and the
|
||||
backend data store structure.
|
||||
|
||||
This command can't be called on an already-initialized Vault.
|
||||
This command can't be called on an already-initialized Vault server.
|
||||
|
||||
General Options:
|
||||
` + meta.GeneralOptionsUsage() + `
|
||||
|
||||
@@ -28,7 +28,7 @@ func (c *ListCommand) Run(args []string) int {
|
||||
|
||||
args = flags.Args()
|
||||
if len(args) != 1 || len(args[0]) == 0 {
|
||||
c.Ui.Error("read expects one argument")
|
||||
c.Ui.Error("list expects one argument")
|
||||
flags.Usage()
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (c *MountCommand) Run(args []string) int {
|
||||
if len(args) != 1 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\nMount expects one argument: the type to mount."))
|
||||
"\nmount expects one argument: the type to mount."))
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ Mount Options:
|
||||
the mount. This shows up in the mounts command.
|
||||
|
||||
-path=<path> Mount point for the logical backend. This
|
||||
defauls to the type of the mount.
|
||||
defaults to the type of the mount.
|
||||
|
||||
-default-lease-ttl=<duration> Default lease time-to-live for this backend.
|
||||
If not specified, uses the global default, or
|
||||
|
||||
@@ -28,7 +28,7 @@ func (c *MountTuneCommand) Run(args []string) int {
|
||||
if len(args) != 1 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\n'mount-tune' expects one arguments: the mount path"))
|
||||
"\nmount-tune expects one arguments: the mount path"))
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ func (c *PathHelpCommand) Run(args []string) int {
|
||||
if strings.Contains(err.Error(), "Vault is sealed") {
|
||||
c.Ui.Error(`Error: Vault is sealed.
|
||||
|
||||
The path-help command requires the Vault to be unsealed so that
|
||||
The path-help command requires the vault to be unsealed so that
|
||||
mount points of secret backends are known.`)
|
||||
} else {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
@@ -67,7 +67,7 @@ Usage: vault path-help [options] path
|
||||
providers provide built-in help. This command looks up and outputs that
|
||||
help.
|
||||
|
||||
The command requires that the Vault be unsealed, because otherwise
|
||||
The command requires that the vault be unsealed, because otherwise
|
||||
the mount points of the backends are unknown.
|
||||
|
||||
General Options:
|
||||
|
||||
@@ -194,11 +194,11 @@ func (c *RekeyCommand) Run(args []string) int {
|
||||
c.Ui.Output(fmt.Sprintf(
|
||||
"\n"+
|
||||
"Vault rekeyed with %d keys and a key threshold of %d. Please\n"+
|
||||
"securely distribute the above keys. When the Vault is re-sealed,\n"+
|
||||
"securely distribute the above keys. When the vault is re-sealed,\n"+
|
||||
"restarted, or stopped, you must provide at least %d of these keys\n"+
|
||||
"to unseal it again.\n\n"+
|
||||
"Vault does not store the master key. Without at least %d keys,\n"+
|
||||
"your Vault will remain permanently sealed.",
|
||||
"your vault will remain permanently sealed.",
|
||||
shares,
|
||||
threshold,
|
||||
threshold,
|
||||
@@ -361,7 +361,7 @@ Usage: vault rekey [options] [key]
|
||||
a new set of unseal keys or to change the number of shares and the
|
||||
required threshold.
|
||||
|
||||
Rekey can only be done when the Vault is already unsealed. The operation
|
||||
Rekey can only be done when the vault is already unsealed. The operation
|
||||
is done online, but requires that a threshold of the current unseal
|
||||
keys be provided.
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func (c *RemountCommand) Run(args []string) int {
|
||||
if len(args) != 2 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\nRemount expects two arguments: the from and to path"))
|
||||
"\nremount expects two arguments: the from and to path"))
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -62,8 +62,8 @@ Usage: vault remount [options] from to
|
||||
|
||||
This command remounts a secret backend that is already mounted to
|
||||
a new path. All the secrets from the old path will be revoked, but
|
||||
the Vault data associated with the backend will be preserved (such
|
||||
as configuration data).
|
||||
the data associated with the backend (such as configuration), will
|
||||
be preserved.
|
||||
|
||||
Example: vault remount secret/ generic/
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ func (c *RenewCommand) Run(args []string) int {
|
||||
if len(args) < 1 || len(args) >= 3 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\nRenew expects at least one argument: the lease ID to renew"))
|
||||
"\nrenew expects at least one argument: the lease ID to renew"))
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ func (c *RevokeCommand) Run(args []string) int {
|
||||
if len(args) != 1 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\nRevoke expects one argument: the ID to revoke"))
|
||||
"\nrevoke expects one argument: the ID to revoke"))
|
||||
return 1
|
||||
}
|
||||
leaseId := args[0]
|
||||
|
||||
@@ -36,7 +36,7 @@ func (c *SealCommand) Run(args []string) int {
|
||||
}
|
||||
|
||||
func (c *SealCommand) Synopsis() string {
|
||||
return "Seals the vault server"
|
||||
return "Seals the Vault server"
|
||||
}
|
||||
|
||||
func (c *SealCommand) Help() string {
|
||||
@@ -47,8 +47,8 @@ Usage: vault seal [options]
|
||||
|
||||
Sealing a vault tells the Vault server to stop responding to any
|
||||
access operations until it is unsealed again. A sealed vault throws away
|
||||
its master key to unlock the data, so it physically is blocked from
|
||||
responding to operations again until the Vault is unsealed again with
|
||||
its master key to unlock the data, so it is physically blocked from
|
||||
responding to operations again until the vault is unsealed with
|
||||
the "unseal" command or via the API.
|
||||
|
||||
This command is idempotent, if the vault is already sealed it does nothing.
|
||||
|
||||
@@ -615,12 +615,12 @@ CLUSTER_SYNTHESIS_COMPLETE:
|
||||
core.SetClusterListenerAddrs(clusterAddrs)
|
||||
core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))
|
||||
|
||||
// If we're in dev mode, then initialize the core
|
||||
// If we're in Dev mode, then initialize the core
|
||||
if dev {
|
||||
init, err := c.enableDev(core, devRootTokenID)
|
||||
if err != nil {
|
||||
c.Ui.Output(fmt.Sprintf(
|
||||
"Error initializing dev mode: %s", err))
|
||||
"Error initializing Dev mode: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -1025,7 +1025,7 @@ Usage: vault server [options]
|
||||
with "vault unseal" or the API before this server can respond to requests.
|
||||
This must be done for every server.
|
||||
|
||||
If the server is being started against a storage backend that has
|
||||
If the server is being started against a storage backend that is
|
||||
brand new (no existing Vault data in it), it must be initialized with
|
||||
"vault init" or the API first.
|
||||
|
||||
|
||||
@@ -271,7 +271,7 @@ func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) {
|
||||
}
|
||||
|
||||
func (c *SSHCommand) Synopsis() string {
|
||||
return "Initiate a SSH session"
|
||||
return "Initiate an SSH session"
|
||||
}
|
||||
|
||||
func (c *SSHCommand) Help() string {
|
||||
@@ -282,12 +282,12 @@ Usage: vault ssh [options] username@ip
|
||||
|
||||
This command generates a key and uses it to establish an SSH
|
||||
connection with the target machine. This operation requires
|
||||
that SSH backend is mounted and at least one 'role' be registed
|
||||
with vault at priori.
|
||||
that the SSH backend is mounted and at least one 'role' is
|
||||
registered with Vault beforehand.
|
||||
|
||||
For setting up SSH backends with one-time-passwords, installation
|
||||
of agent in target machines is required.
|
||||
See [https://github.com/hashicorp/vault-ssh-agent]
|
||||
of vault-ssh-helper or a compatible agent on target machines
|
||||
is required. See [https://github.com/hashicorp/vault-ssh-agent].
|
||||
|
||||
General Options:
|
||||
` + meta.GeneralOptionsUsage() + `
|
||||
|
||||
@@ -120,7 +120,7 @@ General Options:
|
||||
Token Options:
|
||||
|
||||
-id="7699125c-d8...." The token value that clients will use to authenticate
|
||||
with vault. If not provided this defaults to a 36
|
||||
with Vault. If not provided this defaults to a 36
|
||||
character UUID. A root token is required to specify
|
||||
the ID of a token.
|
||||
|
||||
@@ -151,8 +151,8 @@ Token Options:
|
||||
up in the audit log. This can be specified multiple
|
||||
times.
|
||||
|
||||
-orphan If specified, the token will have no parent. Only
|
||||
This prevents the new token from being revoked with
|
||||
-orphan If specified, the token will have no parent. This
|
||||
prevents the new token from being revoked with
|
||||
your token. Requires a root/sudo token to use.
|
||||
|
||||
-no-default-policy If specified, the token will not have the "default"
|
||||
|
||||
@@ -15,8 +15,11 @@ type TokenRevokeCommand struct {
|
||||
func (c *TokenRevokeCommand) Run(args []string) int {
|
||||
var mode string
|
||||
var accessor bool
|
||||
var self bool
|
||||
var token string
|
||||
flags := c.Meta.FlagSet("token-revoke", meta.FlagSetDefault)
|
||||
flags.BoolVar(&accessor, "accessor", false, "")
|
||||
flags.BoolVar(&self, "self", false, "")
|
||||
flags.StringVar(&mode, "mode", "", "")
|
||||
flags.Usage = func() { c.Ui.Error(c.Help()) }
|
||||
if err := flags.Parse(args); err != nil {
|
||||
@@ -24,15 +27,21 @@ func (c *TokenRevokeCommand) Run(args []string) int {
|
||||
}
|
||||
|
||||
args = flags.Args()
|
||||
if len(args) != 1 {
|
||||
switch {
|
||||
case len(args) == 1 && !self:
|
||||
token = args[0]
|
||||
case len(args) != 0 && self:
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\ntoken-revoke expects one argument"))
|
||||
"\ntoken-revoke expects no arguments when revoking self"))
|
||||
return 1
|
||||
case len(args) != 1 && !self:
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\ntoken-revoke expects one argument or the 'self' flag"))
|
||||
return 1
|
||||
}
|
||||
|
||||
token := args[0]
|
||||
|
||||
client, err := c.Client()
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
@@ -43,14 +52,22 @@ func (c *TokenRevokeCommand) Run(args []string) int {
|
||||
var fn func(string) error
|
||||
// Handle all 6 possible combinations
|
||||
switch {
|
||||
case !accessor && mode == "":
|
||||
case !accessor && self && mode == "":
|
||||
fn = client.Auth().Token().RevokeSelf
|
||||
case !accessor && !self && mode == "":
|
||||
fn = client.Auth().Token().RevokeTree
|
||||
case !accessor && mode == "orphan":
|
||||
case !accessor && !self && mode == "orphan":
|
||||
fn = client.Auth().Token().RevokeOrphan
|
||||
case !accessor && mode == "path":
|
||||
case !accessor && !self && mode == "path":
|
||||
fn = client.Sys().RevokePrefix
|
||||
case accessor && mode == "":
|
||||
case accessor && !self && mode == "":
|
||||
fn = client.Auth().Token().RevokeAccessor
|
||||
case accessor && self:
|
||||
c.Ui.Error("token-revoke cannot be run on self when 'accessor' flag is set")
|
||||
return 1
|
||||
case self && mode != "":
|
||||
c.Ui.Error("token-revoke cannot be run on self when 'mode' flag is set")
|
||||
return 1
|
||||
case accessor && mode == "orphan":
|
||||
c.Ui.Error("token-revoke cannot be run for 'orphan' mode when 'accessor' flag is set")
|
||||
return 1
|
||||
@@ -99,7 +116,7 @@ Usage: vault token-revoke [options] [token|accessor]
|
||||
Token can be revoked using the token accessor. This can be done by
|
||||
setting the '-accessor' flag. Note that when '-accessor' flag is set,
|
||||
'-mode' should not be set for 'orphan' or 'path'. This is because,
|
||||
a token accessor always revokes the token along with it's child tokens.
|
||||
a token accessor always revokes the token along with its child tokens.
|
||||
|
||||
General Options:
|
||||
` + meta.GeneralOptionsUsage() + `
|
||||
@@ -110,6 +127,8 @@ Token Options:
|
||||
via '/auth/token/lookup-accessor/<accessor>' endpoint.
|
||||
Accessor is used when there is no access to token ID.
|
||||
|
||||
-self A boolean flag, if set, the operation is performed on the currently
|
||||
authenticated token i.e. lookup-self.
|
||||
|
||||
-mode=value The type of revocation to do. See the documentation
|
||||
above for more information.
|
||||
|
||||
@@ -23,7 +23,7 @@ func (c *UnmountCommand) Run(args []string) int {
|
||||
if len(args) != 1 {
|
||||
flags.Usage()
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"\nUnmount expects one argument: the path to unmount"))
|
||||
"\nunmount expects one argument: the path to unmount"))
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ func (c *UnsealCommand) Run(args []string) int {
|
||||
}
|
||||
|
||||
func (c *UnsealCommand) Synopsis() string {
|
||||
return "Unseals the vault server"
|
||||
return "Unseals the Vault server"
|
||||
}
|
||||
|
||||
func (c *UnsealCommand) Help() string {
|
||||
@@ -105,7 +105,7 @@ func (c *UnsealCommand) Help() string {
|
||||
Usage: vault unseal [options] [key]
|
||||
|
||||
Unseal the vault by entering a portion of the master key. Once all
|
||||
portions are entered, the Vault will be unsealed.
|
||||
portions are entered, the vault will be unsealed.
|
||||
|
||||
Every Vault server initially starts as sealed. It cannot perform any
|
||||
operation except unsealing until it is sealed. Secrets cannot be accessed
|
||||
|
||||
@@ -37,7 +37,7 @@ func (c *UnwrapCommand) Run(args []string) int {
|
||||
case 1:
|
||||
tokenID = args[0]
|
||||
default:
|
||||
c.Ui.Error("Unwrap expects zero or one argument (the ID of the wrapping token)")
|
||||
c.Ui.Error("unwrap expects zero or one argument (the ID of the wrapping token)")
|
||||
flags.Usage()
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func IPBelongsToCIDRBlocksString(ipAddr string, cidrList, separator string) (boo
|
||||
return false, fmt.Errorf("invalid IP address")
|
||||
}
|
||||
|
||||
return IPBelongsToCIDRBlocksSlice(ipAddr, strutil.ParseDedupAndSortStrings(cidrList, separator))
|
||||
return IPBelongsToCIDRBlocksSlice(ipAddr, strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
|
||||
}
|
||||
|
||||
// IPBelongsToCIDRBlocksSlice checks if the given IP is encompassed by any of the given
|
||||
@@ -95,7 +95,7 @@ func ValidateCIDRListString(cidrList string, separator string) (bool, error) {
|
||||
return false, fmt.Errorf("missing separator")
|
||||
}
|
||||
|
||||
return ValidateCIDRListSlice(strutil.ParseDedupAndSortStrings(cidrList, separator))
|
||||
return ValidateCIDRListSlice(strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator))
|
||||
}
|
||||
|
||||
// ValidateCIDRListSlice checks if the given list of CIDR blocks are valid
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
// Builder is a struct to build a key/value mapping based on a list
|
||||
@@ -107,6 +108,17 @@ func (b *Builder) add(raw string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Repeated keys will be converted into a slice
|
||||
if existingValue, ok := b.result[key]; ok {
|
||||
var sliceValue []interface{}
|
||||
if err := mapstructure.WeakDecode(existingValue, &sliceValue); err != nil {
|
||||
return err
|
||||
}
|
||||
sliceValue = append(sliceValue, value)
|
||||
b.result[key] = sliceValue
|
||||
return nil
|
||||
}
|
||||
|
||||
b.result[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -85,3 +85,36 @@ func TestBuilder_stdinTwice(t *testing.T) {
|
||||
t.Fatal("should error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_sameKeyTwice(t *testing.T) {
|
||||
var b Builder
|
||||
err := b.Add("foo=bar", "foo=baz")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
expected := map[string]interface{}{
|
||||
"foo": []interface{}{"bar", "baz"},
|
||||
}
|
||||
actual := b.Map()
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_sameKeyMultipleTimes(t *testing.T) {
|
||||
var b Builder
|
||||
err := b.Add("foo=bar", "foo=baz", "foo=bay", "foo=bax", "bar=baz")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
expected := map[string]interface{}{
|
||||
"foo": []interface{}{"bar", "baz", "bay", "bax"},
|
||||
"bar": "baz",
|
||||
}
|
||||
actual := b.Map()
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func SanitizePolicies(policies []string, addDefault bool) []string {
|
||||
policies = append(policies, "default")
|
||||
}
|
||||
|
||||
return strutil.RemoveDuplicates(policies)
|
||||
return strutil.RemoveDuplicates(policies, true)
|
||||
}
|
||||
|
||||
// EquivalentPolicies checks whether the given policy sets are equivalent, as in,
|
||||
|
||||
@@ -32,14 +32,14 @@ func StrListSubset(super, sub []string) bool {
|
||||
// Parses a comma separated list of strings into a slice of strings.
|
||||
// The return slice will be sorted and will not contain duplicate or
|
||||
// empty items. The values will be converted to lower case.
|
||||
func ParseDedupAndSortStrings(input string, sep string) []string {
|
||||
func ParseDedupLowercaseAndSortStrings(input string, sep string) []string {
|
||||
input = strings.TrimSpace(input)
|
||||
parsed := []string{}
|
||||
if input == "" {
|
||||
// Don't return nil
|
||||
return parsed
|
||||
}
|
||||
return RemoveDuplicates(strings.Split(input, sep))
|
||||
return RemoveDuplicates(strings.Split(input, sep), true)
|
||||
}
|
||||
|
||||
// Parses a comma separated list of `<key>=<value>` tuples into a
|
||||
@@ -49,7 +49,7 @@ func ParseKeyValues(input string, out map[string]string, sep string) error {
|
||||
return fmt.Errorf("'out is nil")
|
||||
}
|
||||
|
||||
keyValues := ParseDedupAndSortStrings(input, sep)
|
||||
keyValues := ParseDedupLowercaseAndSortStrings(input, sep)
|
||||
if len(keyValues) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -174,19 +174,31 @@ func ParseArbitraryStringSlice(input string, sep string) []string {
|
||||
return ret
|
||||
}
|
||||
|
||||
// Removes duplicate and empty elements from a slice of strings.
|
||||
// This also converts the items in the slice to lower case and
|
||||
// returns a sorted slice.
|
||||
func RemoveDuplicates(items []string) []string {
|
||||
// TrimStrings takes a slice of strings and returns a slice of strings
|
||||
// with trimmed spaces
|
||||
func TrimStrings(items []string) []string {
|
||||
ret := make([]string, len(items))
|
||||
for i, item := range items {
|
||||
ret[i] = strings.TrimSpace(item)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Removes duplicate and empty elements from a slice of strings. This also may
|
||||
// convert the items in the slice to lower case and returns a sorted slice.
|
||||
func RemoveDuplicates(items []string, lowercase bool) []string {
|
||||
itemsMap := map[string]bool{}
|
||||
for _, item := range items {
|
||||
item = strings.ToLower(strings.TrimSpace(item))
|
||||
item = strings.TrimSpace(item)
|
||||
if lowercase {
|
||||
item = strings.ToLower(item)
|
||||
}
|
||||
if item == "" {
|
||||
continue
|
||||
}
|
||||
itemsMap[item] = true
|
||||
}
|
||||
items = []string{}
|
||||
items = make([]string, 0, len(itemsMap))
|
||||
for item, _ := range itemsMap {
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
@@ -315,3 +315,12 @@ func TestGlobbedStringsMatch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrimStrings(t *testing.T) {
|
||||
input := []string{"abc", "123", "abcd ", "123 "}
|
||||
expected := []string{"abc", "123", "abcd", "123"}
|
||||
actual := TrimStrings(input)
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Fatalf("Bad TrimStrings: expected:%#v, got:%#v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/helper/errutil"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
)
|
||||
|
||||
@@ -587,6 +587,10 @@ func (t FieldType) Zero() interface{} {
|
||||
return map[string]interface{}{}
|
||||
case TypeDurationSecond:
|
||||
return 0
|
||||
case TypeSlice:
|
||||
return []interface{}{}
|
||||
case TypeStringSlice, TypeCommaStringSlice:
|
||||
return []string{}
|
||||
default:
|
||||
panic("unknown type: " + t.String())
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
@@ -30,7 +31,8 @@ func (d *FieldData) Validate() error {
|
||||
}
|
||||
|
||||
switch schema.Type {
|
||||
case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:
|
||||
case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString, TypeSlice,
|
||||
TypeStringSlice, TypeCommaStringSlice:
|
||||
_, _, err := d.getPrimitive(field, schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error converting input %v for field %s: %s", value, field, err)
|
||||
@@ -105,7 +107,8 @@ func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {
|
||||
}
|
||||
|
||||
switch schema.Type {
|
||||
case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString:
|
||||
case TypeBool, TypeInt, TypeMap, TypeDurationSecond, TypeString,
|
||||
TypeSlice, TypeStringSlice, TypeCommaStringSlice:
|
||||
return d.getPrimitive(k, schema)
|
||||
default:
|
||||
return nil, false,
|
||||
@@ -177,6 +180,36 @@ func (d *FieldData) getPrimitive(
|
||||
}
|
||||
return result, true, nil
|
||||
|
||||
case TypeSlice:
|
||||
var result []interface{}
|
||||
if err := mapstructure.WeakDecode(raw, &result); err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
return result, true, nil
|
||||
|
||||
case TypeStringSlice:
|
||||
var result []string
|
||||
if err := mapstructure.WeakDecode(raw, &result); err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
return strutil.TrimStrings(result), true, nil
|
||||
|
||||
case TypeCommaStringSlice:
|
||||
var result []string
|
||||
config := &mapstructure.DecoderConfig{
|
||||
Result: &result,
|
||||
WeaklyTypedInput: true,
|
||||
DecodeHook: mapstructure.StringToSliceHookFunc(","),
|
||||
}
|
||||
decoder, err := mapstructure.NewDecoder(config)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if err := decoder.Decode(raw); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return strutil.TrimStrings(result), true, nil
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
|
||||
}
|
||||
|
||||
@@ -146,6 +146,105 @@ func TestFieldDataGet(t *testing.T) {
|
||||
"foo",
|
||||
0,
|
||||
},
|
||||
|
||||
"slice type, empty slice": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{},
|
||||
},
|
||||
"foo",
|
||||
[]interface{}{},
|
||||
},
|
||||
|
||||
"slice type, filled, mixed slice": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{123, "abc"},
|
||||
},
|
||||
"foo",
|
||||
[]interface{}{123, "abc"},
|
||||
},
|
||||
|
||||
"string slice type, filled slice": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{123, "abc"},
|
||||
},
|
||||
"foo",
|
||||
[]string{"123", "abc"},
|
||||
},
|
||||
|
||||
"comma string slice type, comma string with one value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": "value1",
|
||||
},
|
||||
"foo",
|
||||
[]string{"value1"},
|
||||
},
|
||||
|
||||
"comma string slice type, comma string with multi value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": "value1,value2,value3",
|
||||
},
|
||||
"foo",
|
||||
[]string{"value1", "value2", "value3"},
|
||||
},
|
||||
|
||||
"comma string slice type, nil string slice value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": "",
|
||||
},
|
||||
"foo",
|
||||
[]string{},
|
||||
},
|
||||
|
||||
"commma string slice type, string slice with one value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{"value1"},
|
||||
},
|
||||
"foo",
|
||||
[]string{"value1"},
|
||||
},
|
||||
|
||||
"comma string slice type, string slice with multi value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{"value1", "value2", "value3"},
|
||||
},
|
||||
"foo",
|
||||
[]string{"value1", "value2", "value3"},
|
||||
},
|
||||
|
||||
"comma string slice type, empty string slice value": {
|
||||
map[string]*FieldSchema{
|
||||
"foo": &FieldSchema{Type: TypeCommaStringSlice},
|
||||
},
|
||||
map[string]interface{}{
|
||||
"foo": []interface{}{},
|
||||
},
|
||||
"foo",
|
||||
[]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
|
||||
@@ -13,6 +13,16 @@ const (
|
||||
// TypeDurationSecond represent as seconds, this can be either an
|
||||
// integer or go duration format string (e.g. 24h)
|
||||
TypeDurationSecond
|
||||
|
||||
// TypeSlice represents a slice of any type
|
||||
TypeSlice
|
||||
// TypeStringSlice is a helper for TypeSlice that returns a sanitized
|
||||
// slice of strings
|
||||
TypeStringSlice
|
||||
// TypeCommaStringSlice is a helper for TypeSlice that returns a sanitized
|
||||
// slice of strings and also supports parsing a comma-separated list in
|
||||
// a string field
|
||||
TypeCommaStringSlice
|
||||
)
|
||||
|
||||
func (t FieldType) String() string {
|
||||
@@ -27,6 +37,8 @@ func (t FieldType) String() string {
|
||||
return "map"
|
||||
case TypeDurationSecond:
|
||||
return "duration (sec)"
|
||||
case TypeSlice, TypeStringSlice, TypeCommaStringSlice:
|
||||
return "slice"
|
||||
default:
|
||||
return "unknown type"
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-go"
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/errwrap"
|
||||
)
|
||||
@@ -59,12 +59,23 @@ func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||
}
|
||||
|
||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create Azure client: %v", err)
|
||||
return nil, fmt.Errorf("failed to create Azure client: %v", err)
|
||||
}
|
||||
|
||||
client.GetBlobService().CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate)
|
||||
contObj := client.GetBlobService().GetContainerReference(container)
|
||||
created, err := contObj.CreateIfNotExists()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upsert container: %v", err)
|
||||
}
|
||||
if created {
|
||||
err = contObj.SetPermissions(storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypePrivate,
|
||||
}, 0, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set permissions on newly-created container: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
maxParStr, ok := conf["max_parallel"]
|
||||
var maxParInt int
|
||||
@@ -156,7 +167,8 @@ func (a *AzureBackend) List(prefix string) ([]string, error) {
|
||||
a.permitPool.Acquire()
|
||||
defer a.permitPool.Release()
|
||||
|
||||
list, err := a.client.ListBlobs(a.container, storage.ListBlobsParameters{Prefix: prefix})
|
||||
contObj := a.client.GetContainerReference(a.container)
|
||||
list, err := contObj.ListBlobs(storage.ListBlobsParameters{Prefix: prefix})
|
||||
|
||||
if err != nil {
|
||||
// Break early.
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-go"
|
||||
)
|
||||
|
||||
func TestAzureBackend(t *testing.T) {
|
||||
@@ -35,7 +35,8 @@ func TestAzureBackend(t *testing.T) {
|
||||
})
|
||||
|
||||
defer func() {
|
||||
cleanupClient.GetBlobService().DeleteContainerIfExists(container)
|
||||
contObj := cleanupClient.GetBlobService().GetContainerReference(container)
|
||||
contObj.DeleteIfExists()
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -233,10 +233,12 @@ func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error
|
||||
kv: client.KV(),
|
||||
permitPool: NewPermitPool(maxParInt),
|
||||
serviceName: service,
|
||||
serviceTags: strutil.ParseDedupAndSortStrings(tags, ","),
|
||||
serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","),
|
||||
checkTimeout: checkTimeout,
|
||||
disableRegistration: disableRegistration,
|
||||
consistencyMode: consistencyMode,
|
||||
notifyActiveCh: make(chan notifyEvent),
|
||||
notifySealedCh: make(chan notifyEvent),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@ package physical
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
@@ -13,6 +16,7 @@ import (
|
||||
var (
|
||||
EtcdSyncConfigError = errors.New("client setup failed: unable to parse etcd sync field in config")
|
||||
EtcdSyncClusterError = errors.New("client setup failed: unable to sync etcd cluster")
|
||||
EtcdMultipleBootstrapError = errors.New("client setup failed: multiple discovery or bootstrap flags specified, use either \"address\" or \"discovery_srv\"")
|
||||
EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')")
|
||||
EtcdSemaphoreKeysEmptyError = errors.New("lock queue is empty")
|
||||
EtcdLockHeldError = errors.New("lock already held")
|
||||
@@ -95,3 +99,47 @@ func getEtcdAPIVersion(c client.Client) (string, error) {
|
||||
|
||||
return "3", nil
|
||||
}
|
||||
|
||||
// Retrieves the config option in order of priority:
|
||||
// 1. The named environment variable if it exist
|
||||
// 2. The key in the config map
|
||||
func getEtcdOption(conf map[string]string, confKey, envVar string) (string, bool) {
|
||||
confVal, inConf := conf[confKey]
|
||||
envVal, inEnv := os.LookupEnv(envVar)
|
||||
if inEnv {
|
||||
return envVal, true
|
||||
}
|
||||
return confVal, inConf
|
||||
}
|
||||
|
||||
func getEtcdEndpoints(conf map[string]string) ([]string, error) {
|
||||
address, staticBootstrap := getEtcdOption(conf, "address", "ETCD_ADDR")
|
||||
domain, useSrv := getEtcdOption(conf, "discovery_srv", "ETCD_DISCOVERY_SRV")
|
||||
if useSrv && staticBootstrap {
|
||||
return nil, EtcdMultipleBootstrapError
|
||||
}
|
||||
|
||||
if staticBootstrap {
|
||||
endpoints := strings.Split(address, Etcd2MachineDelimiter)
|
||||
// Verify that the machines are valid URLs
|
||||
for _, e := range endpoints {
|
||||
u, urlErr := url.Parse(e)
|
||||
if urlErr != nil || u.Scheme == "" {
|
||||
return nil, EtcdAddressError
|
||||
}
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
if useSrv {
|
||||
discoverer := client.NewSRVDiscover()
|
||||
endpoints, err := discoverer.Discover(domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %v", err)
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// Set a default endpoints list if no option was set
|
||||
return []string{"http://127.0.0.1:2379"}, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -118,23 +117,9 @@ func newEtcd2Backend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||
}
|
||||
|
||||
func newEtcdV2Client(conf map[string]string) (client.Client, error) {
|
||||
// Set a default machines list and check for an overriding address value.
|
||||
machines := "http://127.0.0.1:2379"
|
||||
if address, ok := conf["address"]; ok {
|
||||
machines = address
|
||||
}
|
||||
machinesEnv := os.Getenv("ETCD_ADDR")
|
||||
if machinesEnv != "" {
|
||||
machines = machinesEnv
|
||||
}
|
||||
machinesParsed := strings.Split(machines, Etcd2MachineDelimiter)
|
||||
|
||||
// Verify that the machines are valid URLs
|
||||
for _, machine := range machinesParsed {
|
||||
u, urlErr := url.Parse(machine)
|
||||
if urlErr != nil || u.Scheme == "" {
|
||||
return nil, EtcdAddressError
|
||||
}
|
||||
endpoints, err := getEtcdEndpoints(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a new client from the supplied address and attempt to sync with the
|
||||
@@ -160,7 +145,7 @@ func newEtcdV2Client(conf map[string]string) (client.Client, error) {
|
||||
}
|
||||
|
||||
cfg := client.Config{
|
||||
Endpoints: machinesParsed,
|
||||
Endpoints: endpoints,
|
||||
Transport: cTransport,
|
||||
}
|
||||
|
||||
|
||||
@@ -48,10 +48,9 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||
path = "/" + path
|
||||
}
|
||||
|
||||
// Set a default machines list and check for an overriding address value.
|
||||
endpoints := []string{"http://127.0.0.1:2379"}
|
||||
if address, ok := conf["address"]; ok {
|
||||
endpoints = strings.Split(address, ",")
|
||||
endpoints, err := getEtcdEndpoints(conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{
|
||||
|
||||
216
physical/mssql.go
Normal file
216
physical/mssql.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
_ "github.com/denisenkom/go-mssqldb"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
type MsSQLBackend struct {
|
||||
dbTable string
|
||||
client *sql.DB
|
||||
statements map[string]*sql.Stmt
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newMsSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
username, ok := conf["username"]
|
||||
if !ok {
|
||||
username = ""
|
||||
}
|
||||
|
||||
password, ok := conf["password"]
|
||||
if !ok {
|
||||
password = ""
|
||||
}
|
||||
|
||||
server, ok := conf["server"]
|
||||
if !ok || server == "" {
|
||||
return nil, fmt.Errorf("missing server")
|
||||
}
|
||||
|
||||
database, ok := conf["database"]
|
||||
if !ok {
|
||||
database = "Vault"
|
||||
}
|
||||
|
||||
table, ok := conf["table"]
|
||||
if !ok {
|
||||
table = "Vault"
|
||||
}
|
||||
|
||||
appname, ok := conf["appname"]
|
||||
if !ok {
|
||||
appname = "Vault"
|
||||
}
|
||||
|
||||
connectionTimeout, ok := conf["connectiontimeout"]
|
||||
if !ok {
|
||||
connectionTimeout = "30"
|
||||
}
|
||||
|
||||
logLevel, ok := conf["loglevel"]
|
||||
if !ok {
|
||||
logLevel = "0"
|
||||
}
|
||||
|
||||
schema, ok := conf["schema"]
|
||||
if !ok || schema == "" {
|
||||
schema = "dbo"
|
||||
}
|
||||
|
||||
connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel)
|
||||
if username != "" {
|
||||
connectionString += ";user id=" + username
|
||||
}
|
||||
|
||||
if password != "" {
|
||||
connectionString += ";password=" + password
|
||||
}
|
||||
|
||||
db, err := sql.Open("mssql", connectionString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to mssql: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil {
|
||||
return nil, fmt.Errorf("failed to create mssql database: %v", err)
|
||||
}
|
||||
|
||||
dbTable := database + "." + schema + "." + table
|
||||
createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME='" + table + "' AND TABLE_SCHEMA='" + schema +
|
||||
"') CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))"
|
||||
|
||||
if schema != "dbo" {
|
||||
if _, err := db.Exec("USE " + database); err != nil {
|
||||
return nil, fmt.Errorf("failed to switch mssql database: %v", err)
|
||||
}
|
||||
|
||||
var num int
|
||||
err = db.QueryRow("SELECT 1 FROM sys.schemas WHERE name = '" + schema + "'").Scan(&num)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
if _, err := db.Exec("CREATE SCHEMA " + schema); err != nil {
|
||||
return nil, fmt.Errorf("failed to create mssql schema: %v", err)
|
||||
}
|
||||
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("failed to check if mssql schema exists: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := db.Exec(createQuery); err != nil {
|
||||
return nil, fmt.Errorf("failed to create mssql table: %v", err)
|
||||
}
|
||||
|
||||
m := &MsSQLBackend{
|
||||
dbTable: dbTable,
|
||||
client: db,
|
||||
statements: make(map[string]*sql.Stmt),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
statements := map[string]string{
|
||||
"put": "IF EXISTS(SELECT 1 FROM " + dbTable + " WHERE Path = ?) UPDATE " + dbTable + " SET Value = ? WHERE Path = ?" +
|
||||
" ELSE INSERT INTO " + dbTable + " VALUES(?, ?)",
|
||||
"get": "SELECT Value FROM " + dbTable + " WHERE Path = ?",
|
||||
"delete": "DELETE FROM " + dbTable + " WHERE Path = ?",
|
||||
"list": "SELECT Path FROM " + dbTable + " WHERE Path LIKE ?",
|
||||
}
|
||||
|
||||
for name, query := range statements {
|
||||
if err := m.prepare(name, query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *MsSQLBackend) prepare(name, query string) error {
|
||||
stmt, err := m.client.Prepare(query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare '%s': %v", name, err)
|
||||
}
|
||||
|
||||
m.statements[name] = stmt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsSQLBackend) Put(entry *Entry) error {
|
||||
defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now())
|
||||
|
||||
_, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsSQLBackend) Get(key string) (*Entry, error) {
|
||||
defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now())
|
||||
|
||||
var result []byte
|
||||
err := m.statements["get"].QueryRow(key).Scan(&result)
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ent := &Entry{
|
||||
Key: key,
|
||||
Value: result,
|
||||
}
|
||||
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
func (m *MsSQLBackend) Delete(key string) error {
|
||||
defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now())
|
||||
|
||||
_, err := m.statements["delete"].Exec(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsSQLBackend) List(prefix string) ([]string, error) {
|
||||
defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now())
|
||||
|
||||
likePrefix := prefix + "%"
|
||||
rows, err := m.statements["list"].Query(likePrefix)
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
err = rows.Scan(&key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan rows: %v", err)
|
||||
}
|
||||
|
||||
key = strings.TrimPrefix(key, prefix)
|
||||
if i := strings.Index(key, "/"); i == -1 {
|
||||
keys = append(keys, key)
|
||||
} else if i != -1 {
|
||||
keys = appendIfMissing(keys, string(key[:i+1]))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
58
physical/mssql_test.go
Normal file
58
physical/mssql_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package physical
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
_ "github.com/denisenkom/go-mssqldb"
|
||||
)
|
||||
|
||||
func TestMsSQLBackend(t *testing.T) {
|
||||
server := os.Getenv("MSSQL_SERVER")
|
||||
if server == "" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
database := os.Getenv("MSSQL_DB")
|
||||
if database == "" {
|
||||
database = "test"
|
||||
}
|
||||
|
||||
table := os.Getenv("MSSQL_TABLE")
|
||||
if table == "" {
|
||||
table = "test"
|
||||
}
|
||||
|
||||
username := os.Getenv("MSSQL_USERNAME")
|
||||
password := os.Getenv("MSSQL_PASSWORD")
|
||||
|
||||
// Run vault tests
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("mssql", logger, map[string]string{
|
||||
"server": server,
|
||||
"database": database,
|
||||
"table": table,
|
||||
"username": username,
|
||||
"password": password,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create new backend: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
mssql := b.(*MsSQLBackend)
|
||||
_, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to drop table: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
testBackend(t, b)
|
||||
testBackend_ListPrefix(t, b)
|
||||
|
||||
}
|
||||
@@ -148,6 +148,7 @@ var builtinBackends = map[string]Factory{
|
||||
"azure": newAzureBackend,
|
||||
"dynamodb": newDynamoDBBackend,
|
||||
"etcd": newEtcdBackend,
|
||||
"mssql": newMsSQLBackend,
|
||||
"mysql": newMySQLBackend,
|
||||
"postgresql": newPostgreSQLBackend,
|
||||
"swift": newSwiftBackend,
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -18,15 +19,17 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/errwrap"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/vault/helper/awsutil"
|
||||
"github.com/hashicorp/vault/helper/consts"
|
||||
)
|
||||
|
||||
// S3Backend is a physical backend that stores data
|
||||
// within an S3 bucket.
|
||||
type S3Backend struct {
|
||||
bucket string
|
||||
client *s3.S3
|
||||
logger log.Logger
|
||||
bucket string
|
||||
client *s3.S3
|
||||
logger log.Logger
|
||||
permitPool *PermitPool
|
||||
}
|
||||
|
||||
@@ -77,10 +80,16 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pooledTransport := cleanhttp.DefaultPooledTransport()
|
||||
pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount
|
||||
|
||||
s3conn := s3.New(session.New(&aws.Config{
|
||||
Credentials: creds,
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(region),
|
||||
HTTPClient: &http.Client{
|
||||
Transport: pooledTransport,
|
||||
},
|
||||
Endpoint: aws.String(endpoint),
|
||||
Region: aws.String(region),
|
||||
}))
|
||||
|
||||
_, err = s3conn.HeadBucket(&s3.HeadBucketInput{Bucket: &bucket})
|
||||
@@ -101,9 +110,9 @@ func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
}
|
||||
|
||||
s := &S3Backend{
|
||||
client: s3conn,
|
||||
bucket: bucket,
|
||||
logger: logger,
|
||||
client: s3conn,
|
||||
bucket: bucket,
|
||||
logger: logger,
|
||||
permitPool: NewPermitPool(maxParInt),
|
||||
}
|
||||
return s, nil
|
||||
|
||||
@@ -58,17 +58,36 @@ func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error)
|
||||
return nil, fmt.Errorf("missing container")
|
||||
}
|
||||
}
|
||||
tenant := os.Getenv("OS_TENANT_NAME")
|
||||
if tenant == "" {
|
||||
tenant = conf["tenant"]
|
||||
project := os.Getenv("OS_PROJECT_NAME")
|
||||
if project == "" {
|
||||
project = conf["project"]
|
||||
|
||||
if project == "" {
|
||||
// Check for KeyStone naming prior to V3
|
||||
project := os.Getenv("OS_TENANT_NAME")
|
||||
if project == "" {
|
||||
project = conf["tenant"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
domain := os.Getenv("OS_USER_DOMAIN_NAME")
|
||||
if domain == "" {
|
||||
domain = conf["domain"]
|
||||
}
|
||||
projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
|
||||
if projectDomain == "" {
|
||||
projectDomain = conf["project-domain"]
|
||||
}
|
||||
|
||||
c := swift.Connection{
|
||||
UserName: username,
|
||||
ApiKey: password,
|
||||
AuthUrl: authUrl,
|
||||
Tenant: tenant,
|
||||
Transport: cleanhttp.DefaultPooledTransport(),
|
||||
Domain: domain,
|
||||
UserName: username,
|
||||
ApiKey: password,
|
||||
AuthUrl: authUrl,
|
||||
Tenant: project,
|
||||
TenantDomain: projectDomain,
|
||||
Transport: cleanhttp.DefaultPooledTransport(),
|
||||
}
|
||||
|
||||
err := c.Authenticate()
|
||||
|
||||
@@ -21,17 +21,21 @@ func TestSwiftBackend(t *testing.T) {
|
||||
username := os.Getenv("OS_USERNAME")
|
||||
password := os.Getenv("OS_PASSWORD")
|
||||
authUrl := os.Getenv("OS_AUTH_URL")
|
||||
tenant := os.Getenv("OS_TENANT_NAME")
|
||||
project := os.Getenv("OS_PROJECT_NAME")
|
||||
domain := os.Getenv("OS_USER_DOMAIN_NAME")
|
||||
projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME")
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
container := fmt.Sprintf("vault-test-%d", ts)
|
||||
|
||||
cleaner := swift.Connection{
|
||||
UserName: username,
|
||||
ApiKey: password,
|
||||
AuthUrl: authUrl,
|
||||
Tenant: tenant,
|
||||
Transport: cleanhttp.DefaultPooledTransport(),
|
||||
Domain: domain,
|
||||
UserName: username,
|
||||
ApiKey: password,
|
||||
AuthUrl: authUrl,
|
||||
Tenant: project,
|
||||
TenantDomain: projectDomain,
|
||||
Transport: cleanhttp.DefaultPooledTransport(),
|
||||
}
|
||||
|
||||
err := cleaner.Authenticate()
|
||||
@@ -63,11 +67,13 @@ func TestSwiftBackend(t *testing.T) {
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("swift", logger, map[string]string{
|
||||
"username": username,
|
||||
"password": password,
|
||||
"container": container,
|
||||
"auth_url": authUrl,
|
||||
"tenant": tenant,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"container": container,
|
||||
"auth_url": authUrl,
|
||||
"project": project,
|
||||
"domain": domain,
|
||||
"project-domain": projectDomain,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
||||
@@ -10,7 +10,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \
|
||||
git mercurial bzr \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV GOVERSION 1.8
|
||||
ENV GOVERSION 1.8.1
|
||||
RUN mkdir /goroot && mkdir /gopath
|
||||
RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \
|
||||
| tar xvzf - -C /goroot --strip-components=1
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/helper/locksutil"
|
||||
"github.com/hashicorp/vault/helper/parseutil"
|
||||
"github.com/hashicorp/vault/helper/policyutil"
|
||||
"github.com/hashicorp/vault/helper/salt"
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
@@ -1468,7 +1468,7 @@ func (ts *TokenStore) handleCreateCommon(
|
||||
|
||||
if len(role.DisallowedPolicies) > 0 {
|
||||
// We don't add the default here because we only want to disallow it if it's explicitly set
|
||||
sanitizedRolePolicies = strutil.RemoveDuplicates(role.DisallowedPolicies)
|
||||
sanitizedRolePolicies = strutil.RemoveDuplicates(role.DisallowedPolicies, true)
|
||||
|
||||
for _, finalPolicy := range finalPolicies {
|
||||
if strutil.StrListContains(sanitizedRolePolicies, finalPolicy) {
|
||||
@@ -2202,9 +2202,9 @@ func (ts *TokenStore) tokenStoreRoleCreateUpdate(
|
||||
|
||||
disallowedPoliciesStr, ok := data.GetOk("disallowed_policies")
|
||||
if ok {
|
||||
entry.DisallowedPolicies = strutil.ParseDedupAndSortStrings(disallowedPoliciesStr.(string), ",")
|
||||
entry.DisallowedPolicies = strutil.ParseDedupLowercaseAndSortStrings(disallowedPoliciesStr.(string), ",")
|
||||
} else if req.Operation == logical.CreateOperation {
|
||||
entry.DisallowedPolicies = strutil.ParseDedupAndSortStrings(data.Get("disallowed_policies").(string), ",")
|
||||
entry.DisallowedPolicies = strutil.ParseDedupLowercaseAndSortStrings(data.Get("disallowed_policies").(string), ",")
|
||||
}
|
||||
|
||||
// Store it
|
||||
|
||||
35
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
35
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -34,8 +34,6 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,6 +46,8 @@ const (
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
@@ -65,24 +65,20 @@ var (
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
)
|
||||
@@ -132,6 +128,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error)
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
@@ -202,7 +199,9 @@ func testOnGCE() bool {
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
|
||||
256
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
Normal file
256
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iam supports the resource-specific operations of Google Cloud
|
||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||
// See https://cloud.google.com/iam for more about IAM.
|
||||
//
|
||||
// Users of the Google Cloud Libraries will typically not use this package
|
||||
// directly. Instead they will begin with some resource that supports IAM, like
|
||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||
package iam
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy{InternalProto: proto}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
type RoleName string
|
||||
|
||||
// Common role names.
|
||||
const (
|
||||
Owner RoleName = "roles/owner"
|
||||
Editor RoleName = "roles/editor"
|
||||
Viewer RoleName = "roles/viewer"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||
AllUsers = "allUsers"
|
||||
|
||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// A Policy is a list of Bindings representing roles
|
||||
// granted to members.
|
||||
//
|
||||
// The zero Policy is a valid policy with no bindings.
|
||||
type Policy struct {
|
||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||
// and provide an exported alias here.
|
||||
|
||||
// This field is exported for use by the Google Cloud Libraries only.
|
||||
// It may become unexported in a future release.
|
||||
InternalProto *pb.Policy
|
||||
}
|
||||
|
||||
// Members returns the list of members with the supplied role.
|
||||
// The return value should not be modified. Use Add and Remove
|
||||
// to modify the members of a role.
|
||||
func (p *Policy) Members(r RoleName) []string {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.Members
|
||||
}
|
||||
|
||||
// HasRole reports whether member has role r.
|
||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||
return memberIndex(member, p.binding(r)) >= 0
|
||||
}
|
||||
|
||||
// Add adds member member to role r if it is not already present.
|
||||
// A new binding is created if there is no binding for the role.
|
||||
func (p *Policy) Add(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
if p.InternalProto == nil {
|
||||
p.InternalProto = &pb.Policy{}
|
||||
}
|
||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||
Role: string(r),
|
||||
Members: []string{member},
|
||||
})
|
||||
return
|
||||
}
|
||||
if memberIndex(member, b) < 0 {
|
||||
b.Members = append(b.Members, member)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
||||
// Roles returns the names of all the roles that appear in the Policy.
|
||||
func (p *Policy) Roles() []RoleName {
|
||||
if p.InternalProto == nil {
|
||||
return nil
|
||||
}
|
||||
var rns []RoleName
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
rns = append(rns, RoleName(b.Role))
|
||||
}
|
||||
return rns
|
||||
}
|
||||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
func memberIndex(m string, b *pb.Binding) int {
|
||||
if b == nil {
|
||||
return -1
|
||||
}
|
||||
for i, mm := range b.Members {
|
||||
if mm == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
64
vendor/cloud.google.com/go/internal/cloud.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package internal provides support for the cloud packages.
|
||||
//
|
||||
// Users should not import this package directly.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const userAgent = "gcloud-golang/0.1"
|
||||
|
||||
// Transport is an http.RoundTripper that appends Google Cloud client's
|
||||
// user-agent to the original request's user-agent header.
|
||||
type Transport struct {
|
||||
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
|
||||
// Do User-Agent some other way.
|
||||
|
||||
// Base is the actual http.RoundTripper
|
||||
// requests will use. It must not be nil.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip appends a user-agent to the existing user-agent
|
||||
// header and delegates the request to the base http.RoundTripper.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req = cloneRequest(req)
|
||||
ua := req.Header.Get("User-Agent")
|
||||
if ua == "" {
|
||||
ua = userAgent
|
||||
} else {
|
||||
ua = fmt.Sprintf("%s %s", ua, userAgent)
|
||||
}
|
||||
req.Header.Set("User-Agent", ua)
|
||||
return t.Base.RoundTrip(req)
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
3
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
3
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
@@ -27,7 +27,8 @@ import (
|
||||
// backoff parameters. It returns when one of the following occurs:
|
||||
// When f's first return value is true, Retry immediately returns with f's second
|
||||
// return value.
|
||||
// When the provided context is done, Retry returns with ctx.Err().
|
||||
// When the provided context is done, Retry returns with an error that
|
||||
// includes both ctx.Error() and the last error returned by f.
|
||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||
return retry(ctx, bo, f, gax.Sleep)
|
||||
}
|
||||
|
||||
6
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
Executable file
6
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||
|
||||
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
Normal file
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate ./update_version.sh
|
||||
|
||||
// Package version contains version information for Google Cloud Client
|
||||
// Libraries for Go, as reported in request headers.
|
||||
package version
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20170404"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func Go() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return strings.IndexRune("0123456789.", r) < 0
|
||||
}
|
||||
36
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
36
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
@@ -96,7 +96,9 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
acls, err = a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -112,7 +114,9 @@ func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
_, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
|
||||
req := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -123,7 +127,9 @@ func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
return a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
@@ -135,7 +141,9 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
acls, err = a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -156,7 +164,9 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
_, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -167,7 +177,9 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
return a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
|
||||
@@ -179,7 +191,9 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
acls, err = a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -195,7 +209,9 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
_, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
|
||||
req := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -206,7 +222,9 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
return a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
|
||||
|
||||
10
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
10
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
@@ -35,12 +35,14 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
||||
}
|
||||
bkt.Name = b.name
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
func (b *BucketHandle) Delete(ctx context.Context) error {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
||||
}
|
||||
|
||||
@@ -80,10 +82,12 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
|
||||
// Attrs returns the metadata for the bucket.
|
||||
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
var resp *raw.Bucket
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
resp, err = b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
@@ -113,7 +117,7 @@ type BucketAttrs struct {
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
MetaGeneration int64
|
||||
|
||||
// StorageClass is the storage class of the bucket. This defines
|
||||
// StorageClass is the default storage class of the bucket. This defines
|
||||
// how objects in the bucket are stored and determines the SLA
|
||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
||||
@@ -231,6 +235,7 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
@@ -309,6 +314,7 @@ func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
|
||||
22
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
22
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
@@ -12,15 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package storage contains a Google Cloud Storage client.
|
||||
//
|
||||
// This package is experimental and may make backwards-incompatible changes.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
@@ -71,17 +67,11 @@ func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rawObject *raw.Object
|
||||
// If any attribute was set, then we make sure the name matches the destination
|
||||
// name, and we check that ContentType is non-empty so we can provide a better
|
||||
// error message than the service.
|
||||
if !reflect.DeepEqual(c.ObjectAttrs, ObjectAttrs{}) {
|
||||
c.ObjectAttrs.Name = c.dst.object
|
||||
if c.ObjectAttrs.ContentType == "" {
|
||||
return nil, errors.New("storage: Copier.ContentType must be non-empty")
|
||||
}
|
||||
rawObject = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, c.src, rawObject)
|
||||
if err != nil {
|
||||
@@ -118,6 +108,7 @@ func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -189,6 +180,7 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
5
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
5
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
@@ -152,5 +152,10 @@ SignedURL for details.
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
||||
|
||||
108
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
108
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/iam"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
|
||||
req := c.raw.Buckets.GetIamPolicy(resource)
|
||||
setClientHeader(req.Header())
|
||||
var rp *raw.Policy
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
rp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
|
||||
rp := iamToStoragePolicy(p)
|
||||
req := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
req := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(req.Header())
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
||||
17
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
17
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
@@ -15,15 +15,22 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
type Reader struct {
|
||||
body io.ReadCloser
|
||||
remain, size int64
|
||||
contentType string
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
@@ -36,6 +43,16 @@ func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if r.remain == 0 && r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
|
||||
84
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
84
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
@@ -39,6 +39,7 @@ import (
|
||||
"google.golang.org/api/transport"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/version"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
@@ -65,6 +66,12 @@ const (
|
||||
ScopeReadWrite = raw.DevstorageReadWriteScope
|
||||
)
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
// Client is a client for interacting with Google Cloud Storage.
|
||||
//
|
||||
// Clients should be reused instead of created as needed.
|
||||
@@ -202,7 +209,7 @@ type SignedURLOptions struct {
|
||||
// If provided, the client should provide the exact value on the request
|
||||
// header in order to use the signed URL.
|
||||
// Optional.
|
||||
MD5 []byte
|
||||
MD5 string
|
||||
}
|
||||
|
||||
// SignedURL returns a URL for the specified object. Signed URLs allow
|
||||
@@ -225,6 +232,12 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
|
||||
if opts.Expires.IsZero() {
|
||||
return "", errors.New("storage: missing required expires option")
|
||||
}
|
||||
if opts.MD5 != "" {
|
||||
md5, err := base64.StdEncoding.DecodeString(opts.MD5)
|
||||
if err != nil || len(md5) != 16 {
|
||||
return "", errors.New("storage: invalid MD5 checksum")
|
||||
}
|
||||
}
|
||||
|
||||
signBytes := opts.SignBytes
|
||||
if opts.PrivateKey != nil {
|
||||
@@ -241,8 +254,6 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
|
||||
sum[:],
|
||||
)
|
||||
}
|
||||
} else {
|
||||
signBytes = opts.SignBytes
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
@@ -254,7 +265,9 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
|
||||
fmt.Fprintf(buf, "%s\n", opts.MD5)
|
||||
fmt.Fprintf(buf, "%s\n", opts.ContentType)
|
||||
fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
|
||||
fmt.Fprintf(buf, "%s", strings.Join(opts.Headers, "\n"))
|
||||
if len(opts.Headers) > 0 {
|
||||
fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n"))
|
||||
}
|
||||
fmt.Fprintf(buf, "%s", u.String())
|
||||
|
||||
b, err := signBytes(buf.Bytes())
|
||||
@@ -339,6 +352,7 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
@@ -412,6 +426,7 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
@@ -452,6 +467,7 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
|
||||
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
|
||||
return err
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() error { return call.Do() })
|
||||
switch e := err.(type) {
|
||||
case nil:
|
||||
@@ -560,15 +576,37 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
|
||||
var (
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
)
|
||||
// Even if there is a CRC header, we can't compute the hash on partial data.
|
||||
if remain == size {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
return &Reader{
|
||||
body: body,
|
||||
size: size,
|
||||
remain: remain,
|
||||
contentType: res.Header.Get("Content-Type"),
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseCRC32c(res *http.Response) (uint32, bool) {
|
||||
const prefix = "crc32c="
|
||||
for _, spec := range res.Header["X-Goog-Hash"] {
|
||||
if strings.HasPrefix(spec, prefix) {
|
||||
c, err := decodeUint32(spec[len(prefix):])
|
||||
if err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// NewWriter returns a storage Writer that writes to the GCS object
|
||||
@@ -656,6 +694,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
|
||||
ContentLanguage: o.ContentLanguage,
|
||||
CacheControl: o.CacheControl,
|
||||
ContentDisposition: o.ContentDisposition,
|
||||
StorageClass: o.StorageClass,
|
||||
Acl: acl,
|
||||
Metadata: o.Metadata,
|
||||
}
|
||||
@@ -717,21 +756,20 @@ type ObjectAttrs struct {
|
||||
// This field is read-only.
|
||||
Generation int64
|
||||
|
||||
// MetaGeneration is the version of the metadata for this
|
||||
// Metageneration is the version of the metadata for this
|
||||
// object at this generation. This field is used for preconditions
|
||||
// and for detecting changes in metadata. A metageneration number
|
||||
// is only meaningful in the context of a particular generation
|
||||
// of a particular object. This field is read-only.
|
||||
MetaGeneration int64
|
||||
Metageneration int64
|
||||
|
||||
// StorageClass is the storage class of the bucket.
|
||||
// StorageClass is the storage class of the object.
|
||||
// This value defines how objects in the bucket are stored and
|
||||
// determines the SLA and the cost of storage. Typical values are
|
||||
// "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"
|
||||
// and "DURABLE_REDUCED_AVAILABILITY".
|
||||
// It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL"
|
||||
// or "REGIONAL" depending on the bucket's location settings. This
|
||||
// field is read-only.
|
||||
// or "REGIONAL" depending on the bucket's location settings.
|
||||
StorageClass string
|
||||
|
||||
// Created is the time the object was created. This field is read-only.
|
||||
@@ -786,11 +824,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
||||
owner = o.Owner.Entity
|
||||
}
|
||||
md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
|
||||
var crc32c uint32
|
||||
d, err := base64.StdEncoding.DecodeString(o.Crc32c)
|
||||
if err == nil && len(d) == 4 {
|
||||
crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
|
||||
}
|
||||
crc32c, _ := decodeUint32(o.Crc32c)
|
||||
var sha256 string
|
||||
if o.CustomerEncryption != nil {
|
||||
sha256 = o.CustomerEncryption.KeySha256
|
||||
@@ -810,7 +844,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
||||
MediaLink: o.MediaLink,
|
||||
Metadata: o.Metadata,
|
||||
Generation: o.Generation,
|
||||
MetaGeneration: o.Metageneration,
|
||||
Metageneration: o.Metageneration,
|
||||
StorageClass: o.StorageClass,
|
||||
CustomerKeySHA256: sha256,
|
||||
Created: convertTime(o.TimeCreated),
|
||||
@@ -819,6 +853,24 @@ func newObject(o *raw.Object) *ObjectAttrs {
|
||||
}
|
||||
}
|
||||
|
||||
// Decode a uint32 encoded in Base64 in big-endian byte order.
|
||||
func decodeUint32(b64 string) (uint32, error) {
|
||||
d, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(d) != 4 {
|
||||
return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d)
|
||||
}
|
||||
return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil
|
||||
}
|
||||
|
||||
// Encode a uint32 as Base64 in big-endian byte order.
|
||||
func encodeUint32(u uint32) string {
|
||||
b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)}
|
||||
return base64.StdEncoding.EncodeToString(b)
|
||||
}
|
||||
|
||||
// Query represents a query to filter objects from a bucket.
|
||||
type Query struct {
|
||||
// Delimiter returns results in a directory-like fashion.
|
||||
|
||||
18
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
18
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -32,6 +33,11 @@ type Writer struct {
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
@@ -81,7 +87,14 @@ func (w *Writer) open() error {
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)).
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
@@ -93,6 +106,7 @@ func (w *Writer) open() error {
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
setClientHeader(call.Header())
|
||||
resp, err = call.Do()
|
||||
}
|
||||
if err != nil {
|
||||
@@ -120,7 +134,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Object.
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
|
||||
5
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
5
vendor/github.com/Azure/azure-sdk-for-go/storage/README.md
generated
vendored
@@ -1,5 +0,0 @@
|
||||
# Azure Storage SDK for Go
|
||||
|
||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
||||
|
||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
||||
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
5
vendor/github.com/Azure/azure-sdk-for-go/storage/version.go
generated
vendored
@@ -1,5 +0,0 @@
|
||||
package storage
|
||||
|
||||
var (
|
||||
sdkVersion = "8.0.0-beta"
|
||||
)
|
||||
21
vendor/github.com/Azure/azure-storage-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-storage-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
10
vendor/github.com/Azure/azure-storage-go/README.md
generated
vendored
Normal file
10
vendor/github.com/Azure/azure-storage-go/README.md
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Azure Storage SDK for Go
|
||||
[](https://godoc.org/github.com/Azure/azure-storage-go) [](https://travis-ci.org/Azure/azure-storage-go) [](https://goreportcard.com/report/github.com/Azure/azure-storage-go)
|
||||
|
||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
||||
|
||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
||||
|
||||
# Contributing
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
@@ -13,45 +13,6 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// A Container is an entry in ContainerListResponse.
|
||||
type Container struct {
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
// TODO (ahmetalpbalkan) Metadata
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
// TODO (ahmetalpbalkan) remaining fields
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// A Blob is an entry in BlobListResponse.
|
||||
type Blob struct {
|
||||
Name string `xml:"Name"`
|
||||
@@ -136,101 +97,6 @@ type BlobHeaders struct {
|
||||
CacheControl string `header:"x-ms-blob-cache-control"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
|
||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
// The list here can be thought of as "folders" that may contain
|
||||
// other folders or blobs.
|
||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
||||
|
||||
// Delimiter is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// BlobType defines the type of the Azure Blob.
|
||||
type BlobType string
|
||||
|
||||
@@ -289,44 +155,9 @@ const (
|
||||
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||
)
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// ContainerAccessPolicyDetails are used for SETTING container policies
|
||||
type ContainerAccessPolicyDetails struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanWrite bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
// ContainerPermissions is used when setting permissions and Access Policies for containers.
|
||||
type ContainerPermissions struct {
|
||||
AccessType ContainerAccessType
|
||||
AccessPolicies []ContainerAccessPolicyDetails
|
||||
}
|
||||
|
||||
// ContainerAccessHeader references header used when setting/getting container ACL
|
||||
const (
|
||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||
)
|
||||
|
||||
// Maximum sizes (per REST API) for various concepts
|
||||
const (
|
||||
MaxBlobBlockSize = 4 * 1024 * 1024
|
||||
MaxBlobBlockSize = 100 * 1024 * 1024
|
||||
MaxBlobPageSize = 4 * 1024 * 1024
|
||||
)
|
||||
|
||||
@@ -387,233 +218,6 @@ var (
|
||||
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
||||
)
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CreateContainer creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
||||
func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateContainerIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
if access != "" {
|
||||
headers[ContainerAccessHeader] = string(access)
|
||||
}
|
||||
return b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
}
|
||||
|
||||
// ContainerExists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (b BlobStorageClient) ContainerExists(name string) (bool, error) {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetContainerPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
|
||||
func (b BlobStorageClient) SetContainerPermissions(container string, containerPermissions ContainerPermissions, timeout int, leaseID string) (err error) {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params)
|
||||
headers := b.client.getStandardHeaders()
|
||||
if containerPermissions.AccessType != "" {
|
||||
headers[ContainerAccessHeader] = string(containerPermissions.AccessType)
|
||||
}
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
body, length, err := generateContainerACLpayload(containerPermissions.AccessPolicies)
|
||||
headers["Content-Length"] = strconv.Itoa(length)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, body, b.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
|
||||
if resp.statusCode != http.StatusOK {
|
||||
return errors.New("Unable to set permissions")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetContainerPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||
// If timeout is 0 then it will not be passed to Azure
|
||||
// leaseID will only be passed to Azure if populated
|
||||
// Returns permissionResponse which is combined permissions and AccessPolicy
|
||||
func (b BlobStorageClient) GetContainerPermissions(container string, timeout int, leaseID string) (*ContainerPermissions, error) {
|
||||
params := url.Values{"restype": {"container"},
|
||||
"comp": {"acl"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
var out AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &out.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
permissionResponse := updateContainerAccessPolicy(out, &resp.headers)
|
||||
return &permissionResponse, nil
|
||||
}
|
||||
|
||||
func updateContainerAccessPolicy(ap AccessPolicy, headers *http.Header) ContainerPermissions {
|
||||
// containerAccess. Blob, Container, empty
|
||||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
|
||||
|
||||
var cp ContainerPermissions
|
||||
cp.AccessType = ContainerAccessType(containerAccess)
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
capd := ContainerAccessPolicyDetails{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
|
||||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
cp.AccessPolicies = append(cp.AccessPolicies, capd)
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// DeleteContainer deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainer(name string) error {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteContainerIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// BlobExists returns true if a blob with given name exists on the specified
|
||||
// container of the storage account.
|
||||
func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
||||
@@ -621,7 +225,7 @@ func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
||||
headers := b.client.getStandardHeaders()
|
||||
resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
@@ -630,14 +234,15 @@ func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
||||
}
|
||||
|
||||
// GetBlobURL gets the canonical URL to the blob with the specified name in the
|
||||
// specified container. This method does not create a publicly accessible URL if
|
||||
// the blob or container is private and this method does not check if the blob
|
||||
// exists.
|
||||
// specified container. If name is not specified, the canonical URL for the entire
|
||||
// container is obtained.
|
||||
// This method does not create a publicly accessible URL if the blob or container
|
||||
// is private and this method does not check if the blob exists.
|
||||
func (b BlobStorageClient) GetBlobURL(container, name string) string {
|
||||
if container == "" {
|
||||
container = "$root"
|
||||
}
|
||||
return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{})
|
||||
}
|
||||
|
||||
// GetBlob returns a stream to read the blob. Caller must call Close() the
|
||||
@@ -701,7 +306,7 @@ func (b BlobStorageClient) leaseCommonPut(container string, name string, headers
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
|
||||
return nil, err
|
||||
@@ -726,10 +331,12 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -749,14 +356,22 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i
|
||||
|
||||
// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
||||
// returns leaseID acquired
|
||||
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
||||
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
||||
func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) {
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers[leaseAction] = acquireLease
|
||||
|
||||
if leaseTimeInSeconds > 0 {
|
||||
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
||||
if leaseTimeInSeconds == -1 {
|
||||
// Do nothing, but don't trigger the following clauses.
|
||||
} else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" {
|
||||
leaseTimeInSeconds = 60
|
||||
} else if leaseTimeInSeconds < 15 {
|
||||
leaseTimeInSeconds = 15
|
||||
}
|
||||
|
||||
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
||||
|
||||
if proposedLeaseID != "" {
|
||||
headers[leaseProposedID] = proposedLeaseID
|
||||
}
|
||||
@@ -871,7 +486,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
@@ -940,7 +555,7 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
@@ -971,7 +586,7 @@ func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
@@ -991,7 +606,7 @@ func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
@@ -1030,7 +645,7 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
|
||||
// reader. Size must be the number of bytes read from reader. To
|
||||
// create an empty blob, use size==0 and reader==nil.
|
||||
//
|
||||
// The API rejects requests with size > 64 MiB (but this limit is not
|
||||
// The API rejects requests with size > 256 MiB (but this limit is not
|
||||
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
||||
// PutBlock, and PutBlockList.
|
||||
//
|
||||
@@ -1051,14 +666,14 @@ func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, siz
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// PutBlock saves the given data chunk to the specified block blob with
|
||||
// given ID.
|
||||
//
|
||||
// The API rejects chunks larger than 4 MiB (but this limit is not
|
||||
// The API rejects chunks larger than 100 MB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
@@ -1070,7 +685,7 @@ func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byt
|
||||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||
// comes as stream but the length is known in advance.
|
||||
//
|
||||
// The API rejects requests with size > 4 MiB (but this limit is not
|
||||
// The API rejects requests with size > 100 MB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
@@ -1090,7 +705,7 @@ func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, s
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
@@ -1108,7 +723,7 @@ func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
@@ -1152,7 +767,7 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
@@ -1188,7 +803,7 @@ func (b BlobStorageClient) PutPage(container, name string, startByte, endByte in
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
@@ -1234,7 +849,7 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
@@ -1258,7 +873,7 @@ func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, ext
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
@@ -1293,7 +908,7 @@ func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (st
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||
return "", err
|
||||
@@ -1328,7 +943,7 @@ func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
@@ -1372,7 +987,7 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
@@ -1383,7 +998,7 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s
|
||||
func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) {
|
||||
resp, err := b.deleteBlob(container, name, extraHeaders)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
@@ -1402,17 +1017,20 @@ func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[s
|
||||
return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a container given its name
|
||||
func pathForContainer(name string) string {
|
||||
return fmt.Sprintf("/%s", name)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a blob given its container and blob
|
||||
// name
|
||||
func pathForBlob(container, name string) string {
|
||||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
|
||||
// helper method to construct the path to either a blob or container
|
||||
func pathForResource(container, name string) string {
|
||||
if len(name) > 0 {
|
||||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
return fmt.Sprintf("/%s", container)
|
||||
}
|
||||
|
||||
// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
|
||||
// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
|
||||
@@ -1442,7 +1060,12 @@ func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name
|
||||
}
|
||||
|
||||
signedExpiry := expiry.UTC().Format(time.RFC3339)
|
||||
signedResource := "b"
|
||||
|
||||
//If blob name is missing, resource is a container
|
||||
signedResource := "c"
|
||||
if len(name) > 0 {
|
||||
signedResource = "b"
|
||||
}
|
||||
|
||||
protocols := "https,http"
|
||||
if HTTPSOnly {
|
||||
@@ -1505,35 +1128,3 @@ func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, sig
|
||||
|
||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||
}
|
||||
|
||||
func generateContainerACLpayload(policies []ContainerAccessPolicyDetails) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, capd := range policies {
|
||||
permission := capd.generateContainerPermissions()
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
func (capd *ContainerAccessPolicyDetails) generateContainerPermissions() (permissions string) {
|
||||
// generate the permissions string (rwd).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if capd.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if capd.CanWrite {
|
||||
permissions += "w"
|
||||
}
|
||||
|
||||
if capd.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
|
||||
return permissions
|
||||
}
|
||||
92
vendor/github.com/Azure/azure-storage-go/blobserviceclient.go
generated
vendored
Normal file
92
vendor/github.com/Azure/azure-storage-go/blobserviceclient.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
|
||||
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return b.client.getServiceProperties(blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
|
||||
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return b.client.setServiceProperties(props, blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// GetContainerReference returns a Container object for the specified container name.
|
||||
func (b BlobStorageClient) GetContainerReference(name string) Container {
|
||||
return Container{
|
||||
bsc: &b,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
|
||||
// assign our client to the newly created Container objects
|
||||
for i := range out.Containers {
|
||||
out.Containers[i].bsc = &b
|
||||
}
|
||||
return &out, err
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
@@ -15,16 +15,18 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the domain name used for storage requests when a
|
||||
// default client is created.
|
||||
// DefaultBaseURL is the domain name used for storage requests in the
|
||||
// public cloud when a default client is created.
|
||||
DefaultBaseURL = "core.windows.net"
|
||||
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// basic client is created.
|
||||
DefaultAPIVersion = "2015-02-21"
|
||||
DefaultAPIVersion = "2016-05-31"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
|
||||
@@ -131,7 +133,15 @@ func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
|
||||
// key in the referenced cloud.
|
||||
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
||||
@@ -347,7 +357,7 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -406,7 +416,7 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -424,9 +434,9 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
||||
return respToRet, nil
|
||||
}
|
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||
defer body.Close()
|
||||
out, err := ioutil.ReadAll(body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
376
vendor/github.com/Azure/azure-storage-go/container.go
generated
vendored
Normal file
376
vendor/github.com/Azure/azure-storage-go/container.go
generated
vendored
Normal file
@@ -0,0 +1,376 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Container represents an Azure container.
|
||||
type Container struct {
|
||||
bsc *BlobStorageClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
}
|
||||
|
||||
func (c *Container) buildPath() string {
|
||||
return fmt.Sprintf("/%s", c.Name)
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
|
||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
// The list here can be thought of as "folders" that may contain
|
||||
// other folders or blobs.
|
||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
||||
|
||||
// Delimiter is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// ContainerAccessPolicy represents each access policy in the container ACL.
|
||||
type ContainerAccessPolicy struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanWrite bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
// ContainerPermissions represents the container ACLs.
|
||||
type ContainerPermissions struct {
|
||||
AccessType ContainerAccessType
|
||||
AccessPolicies []ContainerAccessPolicy
|
||||
}
|
||||
|
||||
// ContainerAccessHeader references header used when setting/getting container ACL
|
||||
const (
|
||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||
)
|
||||
|
||||
// Create creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
||||
func (c *Container) Create() error {
|
||||
resp, err := c.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (c *Container) CreateIfNotExists() (bool, error) {
|
||||
resp, err := c.create()
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) create() (*storageResponse, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// Exists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (c *Container) Exists() (bool, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
|
||||
func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
if permissions.AccessType != "" {
|
||||
headers[ContainerAccessHeader] = string(permissions.AccessType)
|
||||
}
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
||||
headers["Content-Length"] = strconv.Itoa(length)
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return errors.New("Unable to set permissions")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||
// If timeout is 0 then it will not be passed to Azure
|
||||
// leaseID will only be passed to Azure if populated
|
||||
func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
var ap AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buildAccessPolicy(ap, &resp.headers), nil
|
||||
}
|
||||
|
||||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
||||
// containerAccess. Blob, Container, empty
|
||||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
|
||||
permissions := ContainerPermissions{
|
||||
AccessType: ContainerAccessType(containerAccess),
|
||||
AccessPolicies: []ContainerAccessPolicy{},
|
||||
}
|
||||
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
capd := ContainerAccessPolicy{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
|
||||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
|
||||
}
|
||||
return &permissions
|
||||
}
|
||||
|
||||
// Delete deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (c *Container) Delete() error {
|
||||
resp, err := c.delete()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (c *Container) DeleteIfExists() (bool, error) {
|
||||
resp, err := c.delete()
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) delete() (*storageResponse, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}},
|
||||
)
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, capd := range policies {
|
||||
permission := capd.generateContainerPermissions()
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
|
||||
// generate the permissions string (rwd).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if capd.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if capd.CanWrite {
|
||||
permissions += "w"
|
||||
}
|
||||
|
||||
if capd.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
|
||||
return permissions
|
||||
}
|
||||
@@ -67,7 +67,7 @@ func (d *Directory) Create() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -87,9 +87,9 @@ func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil)
|
||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
d.updateEtagAndLastModified(resp.headers)
|
||||
@@ -117,7 +117,7 @@ func (d *Directory) Delete() error {
|
||||
func (d *Directory) DeleteIfExists() (bool, error) {
|
||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -14,12 +15,13 @@ const oneTB = uint64(1099511627776)
|
||||
|
||||
// File represents a file on a share.
|
||||
type File struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties FileProperties `xml:"Properties"`
|
||||
share *Share
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties FileProperties `xml:"Properties"`
|
||||
share *Share
|
||||
FileCopyProperties FileCopyState
|
||||
}
|
||||
|
||||
// FileProperties contains various properties of a file.
|
||||
@@ -38,10 +40,10 @@ type FileProperties struct {
|
||||
// FileCopyState contains various properties of a file copy operation.
|
||||
type FileCopyState struct {
|
||||
CompletionTime string
|
||||
ID string
|
||||
ID string `header:"x-ms-copy-id"`
|
||||
Progress string
|
||||
Source string
|
||||
Status string
|
||||
Status string `header:"x-ms-copy-status"`
|
||||
StatusDesc string
|
||||
}
|
||||
|
||||
@@ -51,6 +53,24 @@ type FileStream struct {
|
||||
ContentMD5 string
|
||||
}
|
||||
|
||||
// FileRequestOptions will be passed to misc file operations.
|
||||
// Currently just Timeout (in seconds) but will expand.
|
||||
type FileRequestOptions struct {
|
||||
Timeout uint // timeout duration in seconds.
|
||||
}
|
||||
|
||||
// getParameters, construct parameters for FileRequestOptions.
|
||||
// currently only timeout, but expecting to grow as functionality fills out.
|
||||
func (p FileRequestOptions) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// FileRanges contains a list of file range information for a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
@@ -104,7 +124,7 @@ func (f *File) Create(maxSize uint64) error {
|
||||
"x-ms-type": "file",
|
||||
}
|
||||
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders))
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -114,6 +134,29 @@ func (f *File) Create(maxSize uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
|
||||
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
||||
extraHeaders := map[string]string{
|
||||
"x-ms-type": "file",
|
||||
"x-ms-copy-source": sourceURL,
|
||||
}
|
||||
|
||||
var parameters url.Values
|
||||
if options != nil {
|
||||
parameters = options.getParameters()
|
||||
}
|
||||
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagLastModifiedAndCopyHeaders(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete immediately removes this file from the storage account.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
||||
@@ -127,7 +170,7 @@ func (f *File) Delete() error {
|
||||
func (f *File) DeleteIfExists() (bool, error) {
|
||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
@@ -221,6 +264,7 @@ func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
||||
var cl uint64
|
||||
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
|
||||
if err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -272,7 +316,7 @@ func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
@@ -318,6 +362,14 @@ func (f *File) updateEtagAndLastModified(headers http.Header) {
|
||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// updates Etag, last modified date and x-ms-copy-id
|
||||
func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
|
||||
f.Properties.Etag = headers.Get("Etag")
|
||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
|
||||
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
|
||||
}
|
||||
|
||||
// updates file properties from the specified HTTP header
|
||||
func (f *File) updateProperties(header http.Header) {
|
||||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
||||
@@ -149,6 +149,20 @@ func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListRe
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's file service.
|
||||
// File service does not support logging
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
|
||||
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return f.client.getServiceProperties(fileServiceName, f.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's file service.
|
||||
// File service does not support logging
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
|
||||
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return f.client.setServiceProperties(props, fileServiceName, f.auth)
|
||||
}
|
||||
|
||||
// retrieves directory or share content
|
||||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
@@ -165,7 +179,7 @@ func (f FileServiceClient) listContent(path string, params url.Values, extraHead
|
||||
}
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
resp.body.Close()
|
||||
readAndCloseBody(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -183,7 +197,7 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool,
|
||||
|
||||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, resp.headers, nil
|
||||
}
|
||||
@@ -192,23 +206,24 @@ func (f FileServiceClient) resourceExists(path string, res resourceType) (bool,
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type
|
||||
func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
||||
resp, err := f.createResourceNoClose(path, res, extraHeaders)
|
||||
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
|
||||
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
defer readAndCloseBody(resp.body)
|
||||
return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type, doesn't close the response body
|
||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := getURLInitValues(compNone, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||
combinedParams := mergeParams(values, urlParams)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
@@ -221,7 +236,7 @@ func (f FileServiceClient) getResourceHeaders(path string, comp compType, res re
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
@@ -250,7 +265,7 @@ func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
@@ -302,7 +317,7 @@ func (f FileServiceClient) setResourceHeaders(path string, comp compType, res re
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
14
vendor/github.com/Azure/azure-storage-go/glide.lock
generated
vendored
Normal file
14
vendor/github.com/Azure/azure-storage-go/glide.lock
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e
|
||||
updated: 2017-02-23T09:58:57.3701584-08:00
|
||||
imports:
|
||||
- name: github.com/Azure/go-autorest
|
||||
version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
||||
subpackages:
|
||||
- autorest/azure
|
||||
- autorest
|
||||
- autorest/date
|
||||
- name: github.com/dgrijalva/jwt-go
|
||||
version: 2268707a8f0843315e2004ee4f1d021dc08baedf
|
||||
testImports:
|
||||
- name: gopkg.in/check.v1
|
||||
version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
|
||||
4
vendor/github.com/Azure/azure-storage-go/glide.yaml
generated
vendored
Normal file
4
vendor/github.com/Azure/azure-storage-go/glide.yaml
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
package: github.com/Azure/azure-sdk-for-go-storage
|
||||
import: []
|
||||
testImport:
|
||||
- package: gopkg.in/check.v1
|
||||
@@ -15,13 +15,6 @@ const (
|
||||
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
|
||||
)
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
||||
@@ -162,7 +155,7 @@ func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
@@ -185,7 +178,7 @@ func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, err
|
||||
if err != nil {
|
||||
return qm, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
for k, v := range resp.headers {
|
||||
if len(v) != 1 {
|
||||
@@ -218,7 +211,7 @@ func (c QueueServiceClient) CreateQueue(name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
@@ -231,7 +224,7 @@ func (c QueueServiceClient) DeleteQueue(name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
@@ -262,7 +255,7 @@ func (c QueueServiceClient) PutMessage(queue string, message string, params PutM
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
@@ -275,7 +268,7 @@ func (c QueueServiceClient) ClearMessages(queue string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
@@ -321,7 +314,7 @@ func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
@@ -341,6 +334,6 @@ func (c QueueServiceClient) UpdateMessage(queue string, messageID string, messag
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
20
vendor/github.com/Azure/azure-storage-go/queueserviceclient.go
generated
vendored
Normal file
20
vendor/github.com/Azure/azure-storage-go/queueserviceclient.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
package storage
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's queue service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
||||
func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return c.client.getServiceProperties(queueServiceName, c.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's queue service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
||||
func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return c.client.setServiceProperties(props, queueServiceName, c.auth)
|
||||
}
|
||||
@@ -32,7 +32,7 @@ func (s *Share) buildPath() string {
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) Create() error {
|
||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -47,9 +47,9 @@ func (s *Share) Create() error {
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) CreateIfNotExists() (bool, error) {
|
||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil)
|
||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
s.updateEtagAndLastModified(resp.headers)
|
||||
@@ -77,7 +77,7 @@ func (s *Share) Delete() error {
|
||||
func (s *Share) DeleteIfExists() (bool, error) {
|
||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
118
vendor/github.com/Azure/azure-storage-go/storageservice.go
generated
vendored
Normal file
118
vendor/github.com/Azure/azure-storage-go/storageservice.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ServiceProperties represents the storage account service properties
|
||||
type ServiceProperties struct {
|
||||
Logging *Logging
|
||||
HourMetrics *Metrics
|
||||
MinuteMetrics *Metrics
|
||||
Cors *Cors
|
||||
}
|
||||
|
||||
// Logging represents the Azure Analytics Logging settings
|
||||
type Logging struct {
|
||||
Version string
|
||||
Delete bool
|
||||
Read bool
|
||||
Write bool
|
||||
RetentionPolicy *RetentionPolicy
|
||||
}
|
||||
|
||||
// RetentionPolicy indicates if retention is enabled and for how many days
|
||||
type RetentionPolicy struct {
|
||||
Enabled bool
|
||||
Days *int
|
||||
}
|
||||
|
||||
// Metrics provide request statistics.
|
||||
type Metrics struct {
|
||||
Version string
|
||||
Enabled bool
|
||||
IncludeAPIs *bool
|
||||
RetentionPolicy *RetentionPolicy
|
||||
}
|
||||
|
||||
// Cors includes all the CORS rules
|
||||
type Cors struct {
|
||||
CorsRule []CorsRule
|
||||
}
|
||||
|
||||
// CorsRule includes all settings for a Cors rule
|
||||
type CorsRule struct {
|
||||
AllowedOrigins string
|
||||
AllowedMethods string
|
||||
MaxAgeInSeconds int
|
||||
ExposedHeaders string
|
||||
AllowedHeaders string
|
||||
}
|
||||
|
||||
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
|
||||
query := url.Values{
|
||||
"restype": {"service"},
|
||||
"comp": {"properties"},
|
||||
}
|
||||
uri := c.getEndpoint(service, "", query)
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out ServiceProperties
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
|
||||
query := url.Values{
|
||||
"restype": {"service"},
|
||||
"comp": {"properties"},
|
||||
}
|
||||
uri := c.getEndpoint(service, "", query)
|
||||
|
||||
// Ideally, StorageServiceProperties would be the output struct
|
||||
// This is to avoid golint stuttering, while generating the correct XML
|
||||
type StorageServiceProperties struct {
|
||||
Logging *Logging
|
||||
HourMetrics *Metrics
|
||||
MinuteMetrics *Metrics
|
||||
Cors *Cors
|
||||
}
|
||||
input := StorageServiceProperties{
|
||||
Logging: props.Logging,
|
||||
HourMetrics: props.HourMetrics,
|
||||
MinuteMetrics: props.MinuteMetrics,
|
||||
Cors: props.Cors,
|
||||
}
|
||||
|
||||
body, length, err := xmlMarshal(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
||||
|
||||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
@@ -5,19 +5,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||
// Service.
|
||||
type TableServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// AzureTable is the typedef of the Azure Table name
|
||||
type AzureTable string
|
||||
|
||||
@@ -68,6 +62,7 @@ func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -111,7 +106,7 @@ func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return err
|
||||
@@ -137,7 +132,7 @@ func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
@@ -167,7 +162,7 @@ func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []Ta
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
@@ -204,6 +199,7 @@ func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int)
|
||||
defer resp.body.Close()
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -124,11 +124,12 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo
|
||||
// The function fails if there is an entity with the same
|
||||
// PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, false, http.MethodPost); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusCreated})
|
||||
sc, err := c.execTable(table, entity, false, http.MethodPost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return checkRespCode(sc, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
|
||||
@@ -162,10 +163,12 @@ func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, spe
|
||||
// one passed as parameter. The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// MergeEntity merges the contents of an entity with the
|
||||
@@ -173,10 +176,12 @@ func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity)
|
||||
// The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// DeleteEntityWithoutCheck deletes the entity matching by
|
||||
@@ -219,19 +224,23 @@ func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity,
|
||||
// InsertOrReplaceEntity inserts an entity in the specified table
|
||||
// or replaced the existing one.
|
||||
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, http.MethodPut); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// InsertOrMergeEntity inserts an entity in the specified table
|
||||
// or merges the existing one.
|
||||
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user