Rename physical backend to storage and alias old value (#2456)

This commit is contained in:
Jeff Mitchell
2017-03-08 09:17:00 -05:00
committed by GitHub
parent 364a86bb0b
commit b1ed578f3d
20 changed files with 132 additions and 117 deletions

View File

@@ -173,8 +173,8 @@ func (c *ServerCommand) Run(args []string) int {
} }
// Ensure that a backend is provided // Ensure that a backend is provided
if config.Backend == nil { if config.Storage == nil {
c.Ui.Output("A physical backend must be specified") c.Ui.Output("A storage backend must be specified")
return 1 return 1
} }
@@ -194,11 +194,11 @@ func (c *ServerCommand) Run(args []string) int {
// Initialize the backend // Initialize the backend
backend, err := physical.NewBackend( backend, err := physical.NewBackend(
config.Backend.Type, c.logger, config.Backend.Config) config.Storage.Type, c.logger, config.Storage.Config)
if err != nil { if err != nil {
c.Ui.Output(fmt.Sprintf( c.Ui.Output(fmt.Sprintf(
"Error initializing backend of type %s: %s", "Error initializing storage of type %s: %s",
config.Backend.Type, err)) config.Storage.Type, err))
return 1 return 1
} }
@@ -224,7 +224,7 @@ func (c *ServerCommand) Run(args []string) int {
coreConfig := &vault.CoreConfig{ coreConfig := &vault.CoreConfig{
Physical: backend, Physical: backend,
RedirectAddr: config.Backend.RedirectAddr, RedirectAddr: config.Storage.RedirectAddr,
HAPhysical: nil, HAPhysical: nil,
Seal: seal, Seal: seal,
AuditBackends: c.AuditBackends, AuditBackends: c.AuditBackends,
@@ -244,39 +244,39 @@ func (c *ServerCommand) Run(args []string) int {
var disableClustering bool var disableClustering bool
// Initialize the separate HA physical backend, if it exists // Initialize the separate HA storage backend, if it exists
var ok bool var ok bool
if config.HABackend != nil { if config.HAStorage != nil {
habackend, err := physical.NewBackend( habackend, err := physical.NewBackend(
config.HABackend.Type, c.logger, config.HABackend.Config) config.HAStorage.Type, c.logger, config.HAStorage.Config)
if err != nil { if err != nil {
c.Ui.Output(fmt.Sprintf( c.Ui.Output(fmt.Sprintf(
"Error initializing backend of type %s: %s", "Error initializing HA storage of type %s: %s",
config.HABackend.Type, err)) config.HAStorage.Type, err))
return 1 return 1
} }
if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
c.Ui.Output("Specified HA backend does not support HA") c.Ui.Output("Specified HA storage does not support HA")
return 1 return 1
} }
if !coreConfig.HAPhysical.HAEnabled() { if !coreConfig.HAPhysical.HAEnabled() {
c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation") c.Ui.Output("Specified HA storage has HA support disabled; please consult documentation")
return 1 return 1
} }
coreConfig.RedirectAddr = config.HABackend.RedirectAddr coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
disableClustering = config.HABackend.DisableClustering disableClustering = config.HAStorage.DisableClustering
if !disableClustering { if !disableClustering {
coreConfig.ClusterAddr = config.HABackend.ClusterAddr coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
} }
} else { } else {
if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
coreConfig.RedirectAddr = config.Backend.RedirectAddr coreConfig.RedirectAddr = config.Storage.RedirectAddr
disableClustering = config.Backend.DisableClustering disableClustering = config.Storage.DisableClustering
if !disableClustering { if !disableClustering {
coreConfig.ClusterAddr = config.Backend.ClusterAddr coreConfig.ClusterAddr = config.Storage.ClusterAddr
} }
} }
} }
@@ -378,12 +378,12 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.reloadFuncsLock = coreConfig.ReloadFuncsLock c.reloadFuncsLock = coreConfig.ReloadFuncsLock
// Compile server information for output later // Compile server information for output later
info["backend"] = config.Backend.Type info["storage"] = config.Storage.Type
info["log level"] = logLevel info["log level"] = logLevel
info["mlock"] = fmt.Sprintf( info["mlock"] = fmt.Sprintf(
"supported: %v, enabled: %v", "supported: %v, enabled: %v",
mlock.Supported(), !config.DisableMlock && mlock.Supported()) mlock.Supported(), !config.DisableMlock && mlock.Supported())
infoKeys = append(infoKeys, "log level", "mlock", "backend") infoKeys = append(infoKeys, "log level", "mlock", "storage")
if coreConfig.ClusterAddr != "" { if coreConfig.ClusterAddr != "" {
info["cluster address"] = coreConfig.ClusterAddr info["cluster address"] = coreConfig.ClusterAddr
@@ -394,16 +394,16 @@ CLUSTER_SYNTHESIS_COMPLETE:
infoKeys = append(infoKeys, "redirect address") infoKeys = append(infoKeys, "redirect address")
} }
if config.HABackend != nil { if config.HAStorage != nil {
info["HA backend"] = config.HABackend.Type info["HA storage"] = config.HAStorage.Type
infoKeys = append(infoKeys, "HA backend") infoKeys = append(infoKeys, "HA storage")
} else { } else {
// If the backend supports HA, then note it // If the storage supports HA, then note it
if coreConfig.HAPhysical != nil { if coreConfig.HAPhysical != nil {
if coreConfig.HAPhysical.HAEnabled() { if coreConfig.HAPhysical.HAEnabled() {
info["backend"] += " (HA available)" info["storage"] += " (HA available)"
} else { } else {
info["backend"] += " (HA disabled)" info["storage"] += " (HA disabled)"
} }
} }
} }

View File

@@ -21,8 +21,8 @@ import (
// Config is the configuration for the vault server. // Config is the configuration for the vault server.
type Config struct { type Config struct {
Listeners []*Listener `hcl:"-"` Listeners []*Listener `hcl:"-"`
Backend *Backend `hcl:"-"` Storage *Storage `hcl:"-"`
HABackend *Backend `hcl:"-"` HAStorage *Storage `hcl:"-"`
HSM *HSM `hcl:"-"` HSM *HSM `hcl:"-"`
@@ -51,7 +51,7 @@ func DevConfig(ha, transactional bool) *Config {
DisableCache: false, DisableCache: false,
DisableMlock: true, DisableMlock: true,
Backend: &Backend{ Storage: &Storage{
Type: "inmem", Type: "inmem",
}, },
@@ -75,11 +75,11 @@ func DevConfig(ha, transactional bool) *Config {
switch { switch {
case ha && transactional: case ha && transactional:
ret.Backend.Type = "inmem_transactional_ha" ret.Storage.Type = "inmem_transactional_ha"
case !ha && transactional: case !ha && transactional:
ret.Backend.Type = "inmem_transactional" ret.Storage.Type = "inmem_transactional"
case ha && !transactional: case ha && !transactional:
ret.Backend.Type = "inmem_ha" ret.Storage.Type = "inmem_ha"
} }
return ret return ret
@@ -95,8 +95,8 @@ func (l *Listener) GoString() string {
return fmt.Sprintf("*%#v", *l) return fmt.Sprintf("*%#v", *l)
} }
// Backend is the backend configuration for the server. // Storage is the underlying storage configuration for the server.
type Backend struct { type Storage struct {
Type string Type string
RedirectAddr string RedirectAddr string
ClusterAddr string ClusterAddr string
@@ -104,7 +104,7 @@ type Backend struct {
Config map[string]string Config map[string]string
} }
func (b *Backend) GoString() string { func (b *Storage) GoString() string {
return fmt.Sprintf("*%#v", *b) return fmt.Sprintf("*%#v", *b)
} }
@@ -215,14 +215,14 @@ func (c *Config) Merge(c2 *Config) *Config {
result.Listeners = append(result.Listeners, l) result.Listeners = append(result.Listeners, l)
} }
result.Backend = c.Backend result.Storage = c.Storage
if c2.Backend != nil { if c2.Storage != nil {
result.Backend = c2.Backend result.Storage = c2.Storage
} }
result.HABackend = c.HABackend result.HAStorage = c.HAStorage
if c2.HABackend != nil { if c2.HAStorage != nil {
result.HABackend = c2.HABackend result.HAStorage = c2.HAStorage
} }
result.HSM = c.HSM result.HSM = c.HSM
@@ -349,6 +349,8 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
valid := []string{ valid := []string{
"atlas", "atlas",
"storage",
"ha_storage",
"backend", "backend",
"ha_backend", "ha_backend",
"hsm", "hsm",
@@ -366,17 +368,30 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
return nil, err return nil, err
} }
// Look for storage but still support old backend
if o := list.Filter("storage"); len(o.Items) > 0 {
if err := parseStorage(&result, o, "storage"); err != nil {
return nil, fmt.Errorf("error parsing 'storage': %s", err)
}
} else {
if o := list.Filter("backend"); len(o.Items) > 0 { if o := list.Filter("backend"); len(o.Items) > 0 {
if err := parseBackends(&result, o); err != nil { if err := parseStorage(&result, o, "backend"); err != nil {
return nil, fmt.Errorf("error parsing 'backend': %s", err) return nil, fmt.Errorf("error parsing 'backend': %s", err)
} }
} }
}
if o := list.Filter("ha_storage"); len(o.Items) > 0 {
if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
return nil, fmt.Errorf("error parsing 'ha_storage': %s", err)
}
} else {
if o := list.Filter("ha_backend"); len(o.Items) > 0 { if o := list.Filter("ha_backend"); len(o.Items) > 0 {
if err := parseHABackends(&result, o); err != nil { if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
return nil, fmt.Errorf("error parsing 'ha_backend': %s", err) return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
} }
} }
}
if o := list.Filter("hsm"); len(o.Items) > 0 { if o := list.Filter("hsm"); len(o.Items) > 0 {
if err := parseHSMs(&result, o); err != nil { if err := parseHSMs(&result, o); err != nil {
@@ -476,22 +491,22 @@ func isTemporaryFile(name string) bool {
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
} }
func parseBackends(result *Config, list *ast.ObjectList) error { func parseStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 { if len(list.Items) > 1 {
return fmt.Errorf("only one 'backend' block is permitted") return fmt.Errorf("only one %q block is permitted", name)
} }
// Get our item // Get our item
item := list.Items[0] item := list.Items[0]
key := "backend" key := name
if len(item.Keys) > 0 { if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string) key = item.Keys[0].Token.Value().(string)
} }
var m map[string]string var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil { if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key)) return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
} }
// Pull out the redirect address since it's common to all backends // Pull out the redirect address since it's common to all backends
@@ -516,12 +531,12 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok { if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v) disableClustering, err = strconv.ParseBool(v)
if err != nil { if err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key)) return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
} }
delete(m, "disable_clustering") delete(m, "disable_clustering")
} }
result.Backend = &Backend{ result.Storage = &Storage{
RedirectAddr: redirectAddr, RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr, ClusterAddr: clusterAddr,
DisableClustering: disableClustering, DisableClustering: disableClustering,
@@ -531,22 +546,22 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
return nil return nil
} }
func parseHABackends(result *Config, list *ast.ObjectList) error { func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 { if len(list.Items) > 1 {
return fmt.Errorf("only one 'ha_backend' block is permitted") return fmt.Errorf("only one %q block is permitted", name)
} }
// Get our item // Get our item
item := list.Items[0] item := list.Items[0]
key := "backend" key := name
if len(item.Keys) > 0 { if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string) key = item.Keys[0].Token.Value().(string)
} }
var m map[string]string var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil { if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("ha_backend.%s:", key)) return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
} }
// Pull out the redirect address since it's common to all backends // Pull out the redirect address since it's common to all backends
@@ -571,12 +586,12 @@ func parseHABackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok { if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v) disableClustering, err = strconv.ParseBool(v)
if err != nil { if err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key)) return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
} }
delete(m, "disable_clustering") delete(m, "disable_clustering")
} }
result.HABackend = &Backend{ result.HAStorage = &Storage{
RedirectAddr: redirectAddr, RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr, ClusterAddr: clusterAddr,
DisableClustering: disableClustering, DisableClustering: disableClustering,

View File

@@ -37,7 +37,7 @@ func TestLoadConfigFile(t *testing.T) {
}, },
}, },
Backend: &Backend{ Storage: &Storage{
Type: "consul", Type: "consul",
RedirectAddr: "foo", RedirectAddr: "foo",
Config: map[string]string{ Config: map[string]string{
@@ -45,7 +45,7 @@ func TestLoadConfigFile(t *testing.T) {
}, },
}, },
HABackend: &Backend{ HAStorage: &Storage{
Type: "consul", Type: "consul",
RedirectAddr: "snafu", RedirectAddr: "snafu",
Config: map[string]string{ Config: map[string]string{
@@ -105,7 +105,7 @@ func TestLoadConfigFile_json(t *testing.T) {
}, },
}, },
Backend: &Backend{ Storage: &Storage{
Type: "consul", Type: "consul",
Config: map[string]string{ Config: map[string]string{
"foo": "bar", "foo": "bar",
@@ -171,7 +171,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
}, },
}, },
Backend: &Backend{ Storage: &Storage{
Type: "consul", Type: "consul",
Config: map[string]string{ Config: map[string]string{
"foo": "bar", "foo": "bar",
@@ -179,7 +179,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
DisableClustering: true, DisableClustering: true,
}, },
HABackend: &Backend{ HAStorage: &Storage{
Type: "consul", Type: "consul",
Config: map[string]string{ Config: map[string]string{
"bar": "baz", "bar": "baz",
@@ -234,7 +234,7 @@ func TestLoadConfigDir(t *testing.T) {
}, },
}, },
Backend: &Backend{ Storage: &Storage{
Type: "consul", Type: "consul",
Config: map[string]string{ Config: map[string]string{
"foo": "bar", "foo": "bar",

View File

@@ -11,7 +11,7 @@
"node_id": "foo_node" "node_id": "foo_node"
} }
}], }],
"backend": { "storage": {
"consul": { "consul": {
"foo": "bar", "foo": "bar",
"disable_clustering": "true" "disable_clustering": "true"

View File

@@ -12,12 +12,12 @@
} }
} }
], ],
"backend":{ "storage":{
"consul":{ "consul":{
"foo":"bar" "foo":"bar"
} }
}, },
"ha_backend":{ "ha_storage":{
"consul":{ "consul":{
"bar":"baz", "bar":"baz",
"disable_clustering": "true" "disable_clustering": "true"

View File

@@ -64,8 +64,8 @@ func TestServer_GoodSeparateHA(t *testing.T) {
t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String()) t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
} }
if !strings.Contains(ui.OutputWriter.String(), "HA Backend:") { if !strings.Contains(ui.OutputWriter.String(), "HA Storage:") {
t.Fatalf("did not find HA Backend: %s", ui.OutputWriter.String()) t.Fatalf("did not find HA Storage: %s", ui.OutputWriter.String())
} }
} }

View File

@@ -13,7 +13,7 @@ The format of this file is [HCL](https://github.com/hashicorp/hcl) or JSON.
An example configuration is shown below: An example configuration is shown below:
```javascript ```javascript
backend "consul" { storage "consul" {
address = "127.0.0.1:8500" address = "127.0.0.1:8500"
path = "vault" path = "vault"
} }
@@ -37,15 +37,15 @@ sending a SIGHUP to the server process. These are denoted below.
## Parameters ## Parameters
- `backend` <tt>([StorageBackend][storage-backend]: \<required\>)</tt> - - `storage` <tt>([StorageBackend][storage-backend]: \<required\>)</tt> -
Configures the storage backend where Vault data is stored. Please see the Configures the storage backend where Vault data is stored. Please see the
[storage backends documentation][storage-backend] for the full list of [storage backends documentation][storage-backend] for the full list of
available storage backends. available storage backends.
- `ha_backend` <tt>([StorageBackend][storage-backend]: nil)</tt> - Configures - `ha_storage` <tt>([StorageBackend][storage-backend]: nil)</tt> - Configures
the storage backend where Vault HA coordination will take place. This must be the storage backend where Vault HA coordination will take place. This must be
an HA-supporting backend. If not set, HA will be attempted on the backend an HA-supporting backend. If not set, HA will be attempted on the backend
given in the `backend` parameter. given in the `storage` parameter.
- `cluster_name` `(string: <generated>)` Specifies the identifier for the - `cluster_name` `(string: <generated>)` Specifies the identifier for the
Vault cluster. If omitted, Vault will generate a value. When connecting to Vault cluster. If omitted, Vault will generate a value. When connecting to

View File

@@ -24,7 +24,7 @@ to the storage container.
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "azure" { storage "azure" {
accountName = "my-storage-account" accountName = "my-storage-account"
accountKey = "abcd1234" accountKey = "abcd1234"
container = "container-efgh5678" container = "container-efgh5678"
@@ -52,7 +52,7 @@ This example shows configuring the Azure storage backend with a custom number of
maximum parallel connections. maximum parallel connections.
```hcl ```hcl
backend "azure" { storage "azure" {
accountName = "my-storage-account" accountName = "my-storage-account"
accountKey = "abcd1234" accountKey = "abcd1234"
container = "container-efgh5678" container = "container-efgh5678"

View File

@@ -22,7 +22,7 @@ check.
by HashiCorp. by HashiCorp.
```hcl ```hcl
backend "consul" { storage "consul" {
address = "127.0.0.1:8500" address = "127.0.0.1:8500"
path = "vault" path = "vault"
} }
@@ -139,7 +139,7 @@ This example shows a sample physical backend configuration which communicates
with a local Consul agent running on `127.0.0.1:8500`. with a local Consul agent running on `127.0.0.1:8500`.
```hcl ```hcl
backend "consul" {} storage "consul" {}
``` ```
### Detailed Customization ### Detailed Customization
@@ -148,7 +148,7 @@ This example shows communicating with Consul on a custom address with an ACL
token. token.
```hcl ```hcl
backend "consul" { storage "consul" {
address = "10.5.7.92:8194" address = "10.5.7.92:8194"
token = "abcd1234" token = "abcd1234"
} }
@@ -161,7 +161,7 @@ This path must be readable and writable by the Consul ACL token, if Consul
configured to use ACLs. configured to use ACLs.
```hcl ```hcl
backend "consul" { storage "consul" {
path = "vault/" path = "vault/"
} }
``` ```
@@ -171,7 +171,7 @@ backend "consul" {
This example shows communicating with Consul over a local unix socket. This example shows communicating with Consul over a local unix socket.
```hcl ```hcl
backend "consul" { storage "consul" {
address = "unix:///tmp/.consul.http.sock" address = "unix:///tmp/.consul.http.sock"
} }
``` ```
@@ -182,7 +182,7 @@ This example shows using a custom CA, certificate, and key file to securely
communicate with Consul over TLS. communicate with Consul over TLS.
```hcl ```hcl
backend "consul" { storage "consul" {
scheme = "https" scheme = "https"
tls_ca_file = "/etc/pem/vault.ca" tls_ca_file = "/etc/pem/vault.ca"
tls_cert_file = "/etc/pem/vault.cert" tls_cert_file = "/etc/pem/vault.cert"

View File

@@ -23,7 +23,7 @@ The DynamoDB storage backend is used to persist Vault's data in
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "dynamodb" { storage "dynamodb" {
ha_enabled = true ha_enabled = true
region = "us-west-2" region = "us-west-2"
table = "vault-data" table = "vault-data"
@@ -99,7 +99,7 @@ discussed in more detail in the [HA concepts page](/docs/concepts/ha.html).
This example shows using a custom table name and read/write capacity. This example shows using a custom table name and read/write capacity.
```hcl ```hcl
backend "dynamodb" { storage "dynamodb" {
table = "my-vault-data" table = "my-vault-data"
read_capacity = 10 read_capacity = 10
@@ -112,7 +112,7 @@ backend "dynamodb" {
This example show enabling high availability for the DynamoDB storage backend. This example show enabling high availability for the DynamoDB storage backend.
```hcl ```hcl
backend "dynamodb" { storage "dynamodb" {
ha_enabled = true ha_enabled = true
redirect_addr = "vault-leader.my-company.internal" redirect_addr = "vault-leader.my-company.internal"
} }

View File

@@ -18,13 +18,13 @@ based on the version of the Etcd cluster.
The v2 API has known issues with HA support and should not be used in HA The v2 API has known issues with HA support and should not be used in HA
scenarios. scenarios.
- **Community Supported** the Etcd storage backend is supported by the - **Community Supported** the Etcd storage backend is supported by CoreOS.
community. While it has undergone review by HashiCorp employees, they may not While it has undergone review by HashiCorp employees, they may not be as
be as knowledgeable about the technology. If you encounter problems with them, knowledgeable about the technology. If you encounter problems with them, you
you may be referred to the original author. may be referred to the original author.
```hcl ```hcl
backend "etcd" { storage "etcd" {
address = "http://localhost:2379" address = "http://localhost:2379"
etcd_api = "v3" etcd_api = "v3"
} }
@@ -92,7 +92,7 @@ discussed in more detail in the [HA concepts page](/docs/concepts/ha.html).
This example shows connecting to the Etcd cluster using a username and password. This example shows connecting to the Etcd cluster using a username and password.
```hcl ```hcl
backend "etcd" { storage "etcd" {
username = "user1234" username = "user1234"
password = "pass5678" password = "pass5678"
} }
@@ -103,7 +103,7 @@ backend "etcd" {
This example shows storing data in a custom path. This example shows storing data in a custom path.
```hcl ```hcl
backend "etcd" { storage "etcd" {
path = "my-vault-data/" path = "my-vault-data/"
} }
``` ```
@@ -113,7 +113,7 @@ backend "etcd" {
This example show enabling high availability for the Etcd storage backend. This example show enabling high availability for the Etcd storage backend.
```hcl ```hcl
backend "etcd" { storage "etcd" {
ha_enabled = true ha_enabled = true
redirect_addr = "vault-leader.my-company.internal" redirect_addr = "vault-leader.my-company.internal"
} }

View File

@@ -21,7 +21,7 @@ situations, or to develop locally where durability is not critical.
HashiCorp. HashiCorp.
```hcl ```hcl
backend "file" { storage "file" {
path = "/mnt/vault/data" path = "/mnt/vault/data"
} }
``` ```
@@ -41,7 +41,7 @@ This example shows the Filesytem storage backend being mounted at
`/mnt/vault/data`. `/mnt/vault/data`.
```hcl ```hcl
backend "file" { storage "file" {
path = "/mnt/vault/data" path = "/mnt/vault/data"
} }
``` ```

View File

@@ -21,7 +21,7 @@ The Google Cloud storage backend is used to persist Vault's data in
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "gcs" { storage "gcs" {
bucket = "my-storage-bucket" bucket = "my-storage-bucket"
credentials_file = "/tmp/credentials.json" credentials_file = "/tmp/credentials.json"
} }
@@ -49,7 +49,7 @@ backend "gcs" {
This example shows a default configuration for the Google Cloud Storage backend. This example shows a default configuration for the Google Cloud Storage backend.
```hcl ```hcl
backend "gcs" { storage "gcs" {
bucket = "my-storage-bucket" bucket = "my-storage-bucket"
credentials_file = "/tmp/credentials.json" credentials_file = "/tmp/credentials.json"
} }

View File

@@ -6,7 +6,7 @@ description: |-
The In-Memory storage backend is used to persist Vault's data entirely The In-Memory storage backend is used to persist Vault's data entirely
in-memory on the same machine in which Vault is running. This is useful for in-memory on the same machine in which Vault is running. This is useful for
development and experimentation, but use of this backend is highly discouraged development and experimentation, but use of this backend is highly discouraged
in production. in production except in very specific use-cases.
--- ---
# In-Memory Storage Backend # In-Memory Storage Backend
@@ -27,7 +27,7 @@ is restarted.
HashiCorp. HashiCorp.
```hcl ```hcl
backend "inmem" {} storage "inmem" {}
``` ```
## `inmem` Parameters ## `inmem` Parameters
@@ -39,5 +39,5 @@ The In-Memory storage backend has no configuration parameters.
This example shows activating the In-Memory storage backend. This example shows activating the In-Memory storage backend.
```hcl ```hcl
backend "inmem" {} storage "inmem" {}
``` ```

View File

@@ -20,10 +20,10 @@ choose one from the navigation on the left.
## Configuration ## Configuration
Storage backend configuration is done through the Vault configuration file using Storage backend configuration is done through the Vault configuration file using
the `backend` stanza: the `storage` stanza:
```hcl ```hcl
backend [NAME] { storage [NAME] {
[PARAMETERS...] [PARAMETERS...]
} }
``` ```
@@ -31,7 +31,7 @@ backend [NAME] {
For example: For example:
```hcl ```hcl
backend "file" { storage "file" {
path = "/mnt/vault/data" path = "/mnt/vault/data"
} }
``` ```

View File

@@ -21,7 +21,7 @@ server or cluster.
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "mysql" { storage "mysql" {
username = "user1234" username = "user1234"
password = "secret123!" password = "secret123!"
database = "vault" database = "vault"
@@ -58,7 +58,7 @@ This example shows configuring the MySQL backend to use a custom database and
table name. table name.
```hcl ```hcl
backend "mysql" { storage "mysql" {
database = "my-vault" database = "my-vault"
table = "vault-data" table = "vault-data"
username = "user1234" username = "user1234"

View File

@@ -21,7 +21,7 @@ The PostgreSQL storage backend is used to persist Vault's data in a
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "postgresql" { storage "postgresql" {
connection_url = "postgres://user123:secret123!@localhost:5432/vault" connection_url = "postgres://user123:secret123!@localhost:5432/vault"
} }
``` ```
@@ -60,7 +60,7 @@ This example shows connecting to a PostgresSQL cluster using full SSL
verification (recommended). verification (recommended).
```hcl ```hcl
backend "postgresql" { storage "postgresql" {
connection_url = "postgres://user:pass@localhost:5432/database?sslmode=verify-full" connection_url = "postgres://user:pass@localhost:5432/database?sslmode=verify-full"
} }
``` ```
@@ -69,7 +69,7 @@ To disable SSL verification (not recommended), replace `verify-full` with
`disable`: `disable`:
```hcl ```hcl
backend "postgresql" { storage "postgresql" {
connection_url = "postgres://user:pass@localhost:5432/database?sslmode=disable" connection_url = "postgres://user:pass@localhost:5432/database?sslmode=disable"
} }
``` ```

View File

@@ -21,7 +21,7 @@ bucket.
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "s3" { storage "s3" {
access_key = "abcd1234" access_key = "abcd1234"
secret_key = "defg5678" secret_key = "defg5678"
bucket = "my-bucket" bucket = "my-bucket"
@@ -62,7 +62,7 @@ cause Vault to attempt to retrieve credentials from the AWS metadata service.
This example shows using Amazon S3 as a storage backed. This example shows using Amazon S3 as a storage backed.
```hcl ```hcl
backend "s3" { storage "s3" {
access_key = "abcd1234" access_key = "abcd1234"
secret_key = "defg5678" secret_key = "defg5678"
bucket = "my-bucket" bucket = "my-bucket"

View File

@@ -22,7 +22,7 @@ The Swift storage backend is used to persist Vault's data in an
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "swift" { storage "swift" {
auth_url = "https://..." auth_url = "https://..."
username = "admin" username = "admin"
password = "secret123!" password = "secret123!"
@@ -59,7 +59,7 @@ backend "swift" {
This example shows a default configuration for Swift. This example shows a default configuration for Swift.
```hcl ```hcl
backend "swift" { storage "swift" {
auth_url = "https://os.initernal/v1/auth" auth_url = "https://os.initernal/v1/auth"
container = "container-239" container = "container-239"

View File

@@ -20,7 +20,7 @@ The Zookeeper storage backend is used to persist Vault's data in
you may be referred to the original author. you may be referred to the original author.
```hcl ```hcl
backend "zookeeper" { storage "zookeeper" {
address = "localhost:2181" address = "localhost:2181"
path = "vault/" path = "vault/"
} }
@@ -89,7 +89,7 @@ This example shows configuring Vault to communicate with a Zookeeper
installation running on a custom port and to store data at a custom path. installation running on a custom port and to store data at a custom path.
```hcl ```hcl
backend "zookeeper" { storage "zookeeper" {
address = "localhost:3253" address = "localhost:3253"
path = "my-vault-data/" path = "my-vault-data/"
} }
@@ -102,7 +102,7 @@ access only to the user "vaultUser". As per Zookeeper's ACL model, the digest
value in `znode_owner` must match the user in `znode_owner`. value in `znode_owner` must match the user in `znode_owner`.
```hcl ```hcl
backend "zookeeper" { storage "zookeeper" {
znode_owner = "digest:vaultUser:raxgVAfnDRljZDAcJFxznkZsExs=" znode_owner = "digest:vaultUser:raxgVAfnDRljZDAcJFxznkZsExs="
auth_info = "digest:vaultUser:abc" auth_info = "digest:vaultUser:abc"
} }
@@ -115,7 +115,7 @@ This example instructs Vault to only allow access from localhost. As this is the
for the ACL check. for the ACL check.
```hcl ```hcl
backend "zookeeper" { storage "zookeeper" {
znode_owner = "ip:127.0.0.1" znode_owner = "ip:127.0.0.1"
} }
``` ```