mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-02 11:38:02 +00:00
Add physical backend migrator command (#5143)
This commit is contained in:
@@ -375,6 +375,13 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
|
||||
BaseCommand: getBaseCommand(),
|
||||
}, nil
|
||||
},
|
||||
"operator migrate": func() (cli.Command, error) {
|
||||
return &OperatorMigrateCommand{
|
||||
BaseCommand: getBaseCommand(),
|
||||
PhysicalBackends: physicalBackends,
|
||||
ShutdownCh: MakeShutdownCh(),
|
||||
}, nil
|
||||
},
|
||||
"operator rekey": func() (cli.Command, error) {
|
||||
return &OperatorRekeyCommand{
|
||||
BaseCommand: getBaseCommand(),
|
||||
|
||||
331
command/operator_migrate.go
Normal file
331
command/operator_migrate.go
Normal file
@@ -0,0 +1,331 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
"github.com/hashicorp/vault/helper/logging"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
var _ cli.Command = (*OperatorMigrateCommand)(nil)
|
||||
var _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil)
|
||||
|
||||
var errAbort = errors.New("Migration aborted")
|
||||
|
||||
type OperatorMigrateCommand struct {
|
||||
*BaseCommand
|
||||
|
||||
PhysicalBackends map[string]physical.Factory
|
||||
flagConfig string
|
||||
flagStart string
|
||||
flagReset bool
|
||||
logger log.Logger
|
||||
ShutdownCh chan struct{}
|
||||
}
|
||||
|
||||
type migratorConfig struct {
|
||||
StorageSource *server.Storage `hcl:"-"`
|
||||
StorageDestination *server.Storage `hcl:"-"`
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) Synopsis() string {
|
||||
return "Migrates Vault data between storage backends"
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: vault operator migrate [options]
|
||||
|
||||
This command starts a storage backend migration process to copy all data
|
||||
from one backend to another. This operates directly on encrypted data and
|
||||
does not require a Vault server, nor any unsealing.
|
||||
|
||||
Start a migration with a configuration file:
|
||||
|
||||
$ vault operator migrate -config=migrate.hcl
|
||||
|
||||
For more information, please see the documentation.
|
||||
|
||||
` + c.Flags().Help()
|
||||
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) Flags() *FlagSets {
|
||||
set := NewFlagSets(c.UI)
|
||||
f := set.NewFlagSet("Command Options")
|
||||
|
||||
f.StringVar(&StringVar{
|
||||
Name: "config",
|
||||
Target: &c.flagConfig,
|
||||
Completion: complete.PredictOr(
|
||||
complete.PredictFiles("*.hcl"),
|
||||
),
|
||||
Usage: "Path to a configuration file. This configuration file should " +
|
||||
"contain only migrator directives.",
|
||||
})
|
||||
|
||||
f.StringVar(&StringVar{
|
||||
Name: "start",
|
||||
Target: &c.flagStart,
|
||||
Usage: "Only copy keys lexicographically at or after this value.",
|
||||
})
|
||||
|
||||
f.BoolVar(&BoolVar{
|
||||
Name: "reset",
|
||||
Target: &c.flagReset,
|
||||
Usage: "Reset the migration lock. No migration will occur.",
|
||||
})
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) AutocompleteArgs() complete.Predictor {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) AutocompleteFlags() complete.Flags {
|
||||
return c.Flags().Completions()
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) Run(args []string) int {
|
||||
c.logger = logging.NewVaultLogger(log.Info)
|
||||
f := c.Flags()
|
||||
|
||||
if err := f.Parse(args); err != nil {
|
||||
c.UI.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
if c.flagConfig == "" {
|
||||
c.UI.Error("Must specify exactly one config path using -config")
|
||||
return 1
|
||||
}
|
||||
|
||||
config, err := c.loadMigratorConfig(c.flagConfig)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfig, err))
|
||||
return 1
|
||||
}
|
||||
|
||||
if err := c.migrate(config); err != nil {
|
||||
if err == errAbort {
|
||||
return 0
|
||||
}
|
||||
c.UI.Error(fmt.Sprintf("Error migrating: %s", err))
|
||||
return 2
|
||||
}
|
||||
|
||||
if c.flagReset {
|
||||
c.UI.Output("Success! Migration lock reset (if it was set).")
|
||||
} else {
|
||||
c.UI.Output("Success! All of the keys have been migrated.")
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// migrate attempts to instantiate the source and destinations backends,
|
||||
// and then invoke the migration the the root of the keyspace.
|
||||
func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
|
||||
from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("error mounting 'storage_source': {{err}}", err)
|
||||
}
|
||||
|
||||
if c.flagReset {
|
||||
if err := SetMigration(from, false); err != nil {
|
||||
return errwrap.Wrapf("error reseting migration lock: {{err}}", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
to, err := c.newBackend(config.StorageDestination.Type, config.StorageDestination.Config)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
|
||||
}
|
||||
|
||||
migrationStatus, err := CheckMigration(from)
|
||||
if err != nil {
|
||||
return errors.New("error checking migration status")
|
||||
}
|
||||
|
||||
if migrationStatus != nil {
|
||||
return fmt.Errorf("Storage migration in progress (started: %s).", migrationStatus.Start.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
if err := SetMigration(from, true); err != nil {
|
||||
return errwrap.Wrapf("error setting migration lock: {{err}}", err)
|
||||
}
|
||||
|
||||
defer SetMigration(from, false)
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
doneCh <- c.migrateAll(ctx, from, to)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-doneCh:
|
||||
return err
|
||||
case <-c.ShutdownCh:
|
||||
c.UI.Output("==> Migration shutdown triggered\n")
|
||||
cancelFunc()
|
||||
<-doneCh
|
||||
return errAbort
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateAll copies all keys in lexicographic order.
|
||||
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error {
|
||||
return dfsScan(ctx, from, func(ctx context.Context, path string) error {
|
||||
if path < c.flagStart || path == migrationLock {
|
||||
return nil
|
||||
}
|
||||
|
||||
entry, err := from.Get(ctx, path)
|
||||
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("error reading entry: {{err}}", err)
|
||||
}
|
||||
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := to.Put(ctx, entry); err != nil {
|
||||
return errwrap.Wrapf("error writing entry: {{err}}", err)
|
||||
}
|
||||
c.logger.Info("moved key: " + path)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *OperatorMigrateCommand) newBackend(kind string, conf map[string]string) (physical.Backend, error) {
|
||||
factory, ok := c.PhysicalBackends[kind]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no Vault storage backend named: %+q", kind)
|
||||
}
|
||||
|
||||
return factory(conf, c.logger)
|
||||
}
|
||||
|
||||
// loadMigratorConfig loads the configuration at the given path
|
||||
func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfig, error) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return nil, fmt.Errorf("location is a directory, not a file")
|
||||
}
|
||||
|
||||
d, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := hcl.ParseBytes(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result migratorConfig
|
||||
if err := hcl.DecodeObject(&result, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list, ok := obj.Node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
|
||||
}
|
||||
|
||||
// Look for storage_* stanzas
|
||||
for _, stanza := range []string{"storage_source", "storage_destination"} {
|
||||
o := list.Filter(stanza)
|
||||
if len(o.Items) != 1 {
|
||||
return nil, fmt.Errorf("exactly one '%s' block is required", stanza)
|
||||
}
|
||||
|
||||
if err := parseStorage(&result, o, stanza); err != nil {
|
||||
return nil, errwrap.Wrapf("error parsing '%s': {{err}}", err)
|
||||
}
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// parseStorage reuses the existing storage parsing that's part of the main Vault
|
||||
// config processing, but only keeps the storage result.
|
||||
func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) error {
|
||||
tmpConfig := new(server.Config)
|
||||
|
||||
if err := server.ParseStorage(tmpConfig, list, name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "storage_source":
|
||||
result.StorageSource = tmpConfig.Storage
|
||||
case "storage_destination":
|
||||
result.StorageDestination = tmpConfig.Storage
|
||||
default:
|
||||
return fmt.Errorf("unknown storage name: %s", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dfsScan will invoke cb with every key from source.
|
||||
// Keys will be traversed in lexicographic, depth-first order.
|
||||
func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.Context, path string) error) error {
|
||||
dfs := []string{""}
|
||||
|
||||
for l := len(dfs); l > 0; l = len(dfs) {
|
||||
key := dfs[len(dfs)-1]
|
||||
if key == "" || strings.HasSuffix(key, "/") {
|
||||
children, err := source.List(ctx, key)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("failed to scan for children: {{err}}", err)
|
||||
}
|
||||
sort.Strings(children)
|
||||
|
||||
// remove List-triggering key and add children in reverse order
|
||||
dfs = dfs[:len(dfs)-1]
|
||||
for i := len(children) - 1; i >= 0; i-- {
|
||||
dfs = append(dfs, key+children[i])
|
||||
}
|
||||
} else {
|
||||
err := cb(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dfs = dfs[:len(dfs)-1]
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
304
command/operator_migrate_test.go
Normal file
304
command/operator_migrate_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
"github.com/hashicorp/vault/helper/base62"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestMigration(t *testing.T) {
|
||||
t.Run("Default", func(t *testing.T) {
|
||||
data := generateData()
|
||||
|
||||
fromFactory := physicalBackends["file"]
|
||||
|
||||
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
|
||||
defer os.RemoveAll(folder)
|
||||
confFrom := map[string]string{
|
||||
"path": folder,
|
||||
}
|
||||
|
||||
from, err := fromFactory(confFrom, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := storeData(from, data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
toFactory := physicalBackends["inmem"]
|
||||
confTo := map[string]string{}
|
||||
to, err := toFactory(confTo, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := OperatorMigrateCommand{
|
||||
logger: log.NewNullLogger(),
|
||||
}
|
||||
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := compareStoredData(to, data, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Start option", func(t *testing.T) {
|
||||
data := generateData()
|
||||
|
||||
fromFactory := physicalBackends["inmem"]
|
||||
confFrom := map[string]string{}
|
||||
from, err := fromFactory(confFrom, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := storeData(from, data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
toFactory := physicalBackends["file"]
|
||||
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
|
||||
defer os.RemoveAll(folder)
|
||||
confTo := map[string]string{
|
||||
"path": folder,
|
||||
}
|
||||
|
||||
to, err := toFactory(confTo, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const start = "m"
|
||||
|
||||
cmd := OperatorMigrateCommand{
|
||||
logger: log.NewNullLogger(),
|
||||
flagStart: start,
|
||||
}
|
||||
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := compareStoredData(to, data, start); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Config parsing", func(t *testing.T) {
|
||||
cmd := new(OperatorMigrateCommand)
|
||||
|
||||
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
|
||||
ioutil.WriteFile(cfgName, []byte(`
|
||||
storage_source "src_type" {
|
||||
path = "src_path"
|
||||
}
|
||||
|
||||
storage_destination "dest_type" {
|
||||
path = "dest_path"
|
||||
}`), 0644)
|
||||
defer os.Remove(cfgName)
|
||||
|
||||
expCfg := &migratorConfig{
|
||||
StorageSource: &server.Storage{
|
||||
Type: "src_type",
|
||||
Config: map[string]string{
|
||||
"path": "src_path",
|
||||
},
|
||||
},
|
||||
StorageDestination: &server.Storage{
|
||||
Type: "dest_type",
|
||||
Config: map[string]string{
|
||||
"path": "dest_path",
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg, err := cmd.loadMigratorConfig(cfgName)
|
||||
if err != nil {
|
||||
t.Fatal(cfg)
|
||||
}
|
||||
if diff := deep.Equal(cfg, expCfg); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
|
||||
verifyBad := func(cfg string) {
|
||||
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
|
||||
_, err := cmd.loadMigratorConfig(cfgName)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error but none received from: %v", cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// missing source
|
||||
verifyBad(`
|
||||
storage_destination "dest_type" {
|
||||
path = "dest_path"
|
||||
}`)
|
||||
|
||||
// missing destination
|
||||
verifyBad(`
|
||||
storage_source "src_type" {
|
||||
path = "src_path"
|
||||
}`)
|
||||
|
||||
// duplicate source
|
||||
verifyBad(`
|
||||
storage_source "src_type" {
|
||||
path = "src_path"
|
||||
}
|
||||
|
||||
storage_source "src_type2" {
|
||||
path = "src_path"
|
||||
}
|
||||
|
||||
storage_destination "dest_type" {
|
||||
path = "dest_path"
|
||||
}`)
|
||||
|
||||
// duplicate destination
|
||||
verifyBad(`
|
||||
storage_source "src_type" {
|
||||
path = "src_path"
|
||||
}
|
||||
|
||||
storage_destination "dest_type" {
|
||||
path = "dest_path"
|
||||
}
|
||||
|
||||
storage_destination "dest_type2" {
|
||||
path = "dest_path"
|
||||
}`)
|
||||
|
||||
})
|
||||
t.Run("DFS Scan", func(t *testing.T) {
|
||||
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
|
||||
|
||||
data := generateData()
|
||||
data["cc"] = []byte{}
|
||||
data["c/d/e/f"] = []byte{}
|
||||
data["c/d/e/g"] = []byte{}
|
||||
data["c"] = []byte{}
|
||||
storeData(s, data)
|
||||
|
||||
l := randomLister{s}
|
||||
|
||||
var out []string
|
||||
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
|
||||
out = append(out, path)
|
||||
return nil
|
||||
})
|
||||
|
||||
var keys []string
|
||||
for key := range data {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
if !reflect.DeepEqual(keys, out) {
|
||||
t.Fatalf("expected equal: %v, %v", keys, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// randomLister wraps a physical backend, providing a List method
|
||||
// that returns results in a random order.
|
||||
type randomLister struct {
|
||||
b physical.Backend
|
||||
}
|
||||
|
||||
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
|
||||
result, err := l.b.List(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rand.Shuffle(len(result), func(i, j int) {
|
||||
result[i], result[j] = result[j], result[i]
|
||||
})
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
|
||||
return l.b.Get(ctx, path)
|
||||
}
|
||||
|
||||
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
|
||||
return l.b.Put(ctx, entry)
|
||||
}
|
||||
|
||||
func (l randomLister) Delete(ctx context.Context, path string) error {
|
||||
return l.b.Delete(ctx, path)
|
||||
}
|
||||
|
||||
// generateData creates a map of 500 random keys and values
|
||||
func generateData() map[string][]byte {
|
||||
result := make(map[string][]byte)
|
||||
for i := 0; i < 500; i++ {
|
||||
segments := make([]string, rand.Intn(8)+1)
|
||||
for j := 0; j < len(segments); j++ {
|
||||
s, _ := base62.Random(6, false)
|
||||
segments[j] = s
|
||||
}
|
||||
data := make([]byte, 100)
|
||||
rand.Read(data)
|
||||
result[strings.Join(segments, "/")] = data
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func storeData(s physical.Backend, ref map[string][]byte) error {
|
||||
for k, v := range ref {
|
||||
entry := physical.Entry{
|
||||
Key: k,
|
||||
Value: v,
|
||||
}
|
||||
|
||||
err := s.Put(context.Background(), &entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
|
||||
for k, v := range ref {
|
||||
entry, err := s.Get(context.Background(), k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k >= start {
|
||||
if entry == nil {
|
||||
return fmt.Errorf("key not found: %s", k)
|
||||
}
|
||||
if !bytes.Equal(v, entry.Value) {
|
||||
return fmt.Errorf("values differ for key: %s", k)
|
||||
}
|
||||
} else {
|
||||
if entry != nil {
|
||||
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
serverseal "github.com/hashicorp/vault/command/server/seal"
|
||||
"github.com/hashicorp/vault/helper/gated-writer"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/helper/logging"
|
||||
"github.com/hashicorp/vault/helper/mlock"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
@@ -52,6 +53,8 @@ import (
|
||||
var _ cli.Command = (*ServerCommand)(nil)
|
||||
var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
|
||||
|
||||
const migrationLock = "core/migration"
|
||||
|
||||
type ServerCommand struct {
|
||||
*BaseCommand
|
||||
|
||||
@@ -460,6 +463,19 @@ func (c *ServerCommand) Run(args []string) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
migrationStatus, err := CheckMigration(backend)
|
||||
if err != nil {
|
||||
c.UI.Error("Error checking migration status")
|
||||
return 1
|
||||
}
|
||||
|
||||
if migrationStatus != nil {
|
||||
startTime := migrationStatus.Start.Format(time.RFC3339)
|
||||
c.UI.Error(wrapAtLength(fmt.Sprintf("Storage migration in progress (started: %s). "+
|
||||
"Use 'vault operator migrate -reset' to force clear the migration lock.", startTime)))
|
||||
return 1
|
||||
}
|
||||
|
||||
infoKeys := make([]string, 0, 10)
|
||||
info := make(map[string]string)
|
||||
info["log level"] = c.flagLogLevel
|
||||
@@ -1773,6 +1789,51 @@ func (c *ServerCommand) removePidFile(pidPath string) error {
|
||||
return os.Remove(pidPath)
|
||||
}
|
||||
|
||||
type MigrationStatus struct {
|
||||
Start time.Time `json:"start"`
|
||||
}
|
||||
|
||||
func CheckMigration(b physical.Backend) (*MigrationStatus, error) {
|
||||
entry, err := b.Get(context.Background(), migrationLock)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entry == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var status MigrationStatus
|
||||
if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &status, nil
|
||||
}
|
||||
|
||||
func SetMigration(b physical.Backend, active bool) error {
|
||||
if !active {
|
||||
return b.Delete(context.Background(), migrationLock)
|
||||
}
|
||||
|
||||
status := MigrationStatus{
|
||||
Start: time.Now(),
|
||||
}
|
||||
|
||||
enc, err := jsonutil.EncodeJSON(status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entry := &physical.Entry{
|
||||
Key: migrationLock,
|
||||
Value: enc,
|
||||
}
|
||||
|
||||
return b.Put(context.Background(), entry)
|
||||
}
|
||||
|
||||
type grpclogFaker struct {
|
||||
logger log.Logger
|
||||
log bool
|
||||
|
||||
@@ -458,12 +458,12 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
|
||||
|
||||
// Look for storage but still support old backend
|
||||
if o := list.Filter("storage"); len(o.Items) > 0 {
|
||||
if err := parseStorage(&result, o, "storage"); err != nil {
|
||||
if err := ParseStorage(&result, o, "storage"); err != nil {
|
||||
return nil, errwrap.Wrapf("error parsing 'storage': {{err}}", err)
|
||||
}
|
||||
} else {
|
||||
if o := list.Filter("backend"); len(o.Items) > 0 {
|
||||
if err := parseStorage(&result, o, "backend"); err != nil {
|
||||
if err := ParseStorage(&result, o, "backend"); err != nil {
|
||||
return nil, errwrap.Wrapf("error parsing 'backend': {{err}}", err)
|
||||
}
|
||||
}
|
||||
@@ -583,7 +583,7 @@ func isTemporaryFile(name string) bool {
|
||||
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
|
||||
}
|
||||
|
||||
func parseStorage(result *Config, list *ast.ObjectList, name string) error {
|
||||
func ParseStorage(result *Config, list *ast.ObjectList, name string) error {
|
||||
if len(list.Items) > 1 {
|
||||
return fmt.Errorf("only one %q block is permitted", name)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user