mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-01 19:17:58 +00:00
Seal migration (OSS) (#781)
This commit is contained in:
@@ -41,6 +41,15 @@ func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) {
|
|||||||
return sealStatusRequest(c, r)
|
return sealStatusRequest(c, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) {
|
||||||
|
r := c.c.NewRequest("PUT", "/v1/sys/unseal")
|
||||||
|
if err := r.SetJSONBody(opts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return sealStatusRequest(c, r)
|
||||||
|
}
|
||||||
|
|
||||||
func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
|
func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||||
defer cancelFunc()
|
defer cancelFunc()
|
||||||
@@ -64,7 +73,14 @@ type SealStatusResponse struct {
|
|||||||
Progress int `json:"progress"`
|
Progress int `json:"progress"`
|
||||||
Nonce string `json:"nonce"`
|
Nonce string `json:"nonce"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
Migration bool `json:"migration"`
|
||||||
ClusterName string `json:"cluster_name,omitempty"`
|
ClusterName string `json:"cluster_name,omitempty"`
|
||||||
ClusterID string `json:"cluster_id,omitempty"`
|
ClusterID string `json:"cluster_id,omitempty"`
|
||||||
RecoverySeal bool `json:"recovery_seal"`
|
RecoverySeal bool `json:"recovery_seal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UnsealOpts struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Reset bool `json:"reset"`
|
||||||
|
Migrate bool `json:"migrate"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -330,6 +330,10 @@ func OutputSealStatus(ui cli.Ui, client *api.Client, status *api.SealStatusRespo
|
|||||||
out = append(out, fmt.Sprintf("Unseal Nonce | %s", status.Nonce))
|
out = append(out, fmt.Sprintf("Unseal Nonce | %s", status.Nonce))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if status.Migration {
|
||||||
|
out = append(out, fmt.Sprintf("Seal Migration in Progress | %t", status.Migration))
|
||||||
|
}
|
||||||
|
|
||||||
out = append(out, fmt.Sprintf("Version | %s", status.Version))
|
out = append(out, fmt.Sprintf("Version | %s", status.Version))
|
||||||
|
|
||||||
if status.ClusterName != "" && status.ClusterID != "" {
|
if status.ClusterName != "" && status.ClusterID != "" {
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ func TestLoginCommand_Run(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, exp := len(storedToken), vault.TokenLength; l != exp {
|
if l, exp := len(storedToken), vault.TokenLength+2; l != exp {
|
||||||
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken)
|
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -206,7 +206,7 @@ func TestLoginCommand_Run(t *testing.T) {
|
|||||||
|
|
||||||
// Verify only the token was printed
|
// Verify only the token was printed
|
||||||
token := ui.OutputWriter.String()
|
token := ui.OutputWriter.String()
|
||||||
if l, exp := len(token), vault.TokenLength; l != exp {
|
if l, exp := len(token), vault.TokenLength+2; l != exp {
|
||||||
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token)
|
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -161,13 +161,11 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
|
|||||||
t.Run("cancel", func(t *testing.T) {
|
t.Run("cancel", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
otp := "3JhHkONiyiaNYj14nnD9xZQS"
|
|
||||||
|
|
||||||
client, closer := testVaultServer(t)
|
client, closer := testVaultServer(t)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
// Initialize a generation
|
// Initialize a generation
|
||||||
if _, err := client.Sys().GenerateRootInit(otp, ""); err != nil {
|
if _, err := client.Sys().GenerateRootInit("", ""); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -200,8 +198,6 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
|
|||||||
t.Run("init_otp", func(t *testing.T) {
|
t.Run("init_otp", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
otp := "3JhHkONiyiaNYj14nnD9xZQS"
|
|
||||||
|
|
||||||
client, closer := testVaultServer(t)
|
client, closer := testVaultServer(t)
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
@@ -210,7 +206,6 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
|
|||||||
|
|
||||||
code := cmd.Run([]string{
|
code := cmd.Run([]string{
|
||||||
"-init",
|
"-init",
|
||||||
"-otp", otp,
|
|
||||||
})
|
})
|
||||||
if exp := 0; code != exp {
|
if exp := 0; code != exp {
|
||||||
t.Errorf("expected %d to be %d", code, exp)
|
t.Errorf("expected %d to be %d", code, exp)
|
||||||
@@ -350,7 +345,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, exp := len(token), vault.TokenLength; l != exp {
|
if l, exp := len(token), vault.TokenLength+2; l != exp {
|
||||||
t.Errorf("expected %d to be %d: %s", l, exp, token)
|
t.Errorf("expected %d to be %d: %s", l, exp, token)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -436,7 +431,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, exp := len(token), vault.TokenLength; l != exp {
|
if l, exp := len(token), vault.TokenLength+2; l != exp {
|
||||||
t.Errorf("expected %d to be %d: %s", l, exp, token)
|
t.Errorf("expected %d to be %d: %s", l, exp, token)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -333,7 +333,7 @@ func TestOperatorInitCommand_Run(t *testing.T) {
|
|||||||
root := match[0][1]
|
root := match[0][1]
|
||||||
decryptedRoot := testPGPDecrypt(t, pgpkeys.TestPrivKey1, root)
|
decryptedRoot := testPGPDecrypt(t, pgpkeys.TestPrivKey1, root)
|
||||||
|
|
||||||
if l, exp := len(decryptedRoot), vault.TokenLength; l != exp {
|
if l, exp := len(decryptedRoot), vault.TokenLength+2; l != exp {
|
||||||
t.Errorf("expected %d to be %d", l, exp)
|
t.Errorf("expected %d to be %d", l, exp)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.flagReset {
|
if c.flagReset {
|
||||||
if err := SetMigration(from, false); err != nil {
|
if err := SetStorageMigration(from, false); err != nil {
|
||||||
return errwrap.Wrapf("error reseting migration lock: {{err}}", err)
|
return errwrap.Wrapf("error reseting migration lock: {{err}}", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -160,7 +160,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
|
|||||||
return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
|
return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
migrationStatus, err := CheckMigration(from)
|
migrationStatus, err := CheckStorageMigration(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("error checking migration status")
|
return errors.New("error checking migration status")
|
||||||
}
|
}
|
||||||
@@ -169,11 +169,11 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
|
|||||||
return fmt.Errorf("Storage migration in progress (started: %s).", migrationStatus.Start.Format(time.RFC3339))
|
return fmt.Errorf("Storage migration in progress (started: %s).", migrationStatus.Start.Format(time.RFC3339))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := SetMigration(from, true); err != nil {
|
if err := SetStorageMigration(from, true); err != nil {
|
||||||
return errwrap.Wrapf("error setting migration lock: {{err}}", err)
|
return errwrap.Wrapf("error setting migration lock: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer SetMigration(from, false)
|
defer SetStorageMigration(from, false)
|
||||||
|
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||||
|
|
||||||
@@ -197,7 +197,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
|
|||||||
// migrateAll copies all keys in lexicographic order.
|
// migrateAll copies all keys in lexicographic order.
|
||||||
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error {
|
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error {
|
||||||
return dfsScan(ctx, from, func(ctx context.Context, path string) error {
|
return dfsScan(ctx, from, func(ctx context.Context, path string) error {
|
||||||
if path < c.flagStart || path == migrationLock || path == vault.CoreLockPath {
|
if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -264,7 +264,7 @@ func generateData() map[string][]byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add special keys that should be excluded from migration
|
// Add special keys that should be excluded from migration
|
||||||
result[migrationLock] = []byte{}
|
result[storageMigrationLock] = []byte{}
|
||||||
result[vault.CoreLockPath] = []byte{}
|
result[vault.CoreLockPath] = []byte{}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
@@ -292,7 +292,7 @@ func compareStoredData(s physical.Backend, ref map[string][]byte, start string)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if k == migrationLock || k == vault.CoreLockPath {
|
if k == storageMigrationLock || k == vault.CoreLockPath {
|
||||||
if entry == nil {
|
if entry == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/vault/api"
|
||||||
"github.com/hashicorp/vault/helper/password"
|
"github.com/hashicorp/vault/helper/password"
|
||||||
"github.com/mitchellh/cli"
|
"github.com/mitchellh/cli"
|
||||||
"github.com/posener/complete"
|
"github.com/posener/complete"
|
||||||
@@ -17,7 +18,8 @@ var _ cli.CommandAutocomplete = (*OperatorUnsealCommand)(nil)
|
|||||||
type OperatorUnsealCommand struct {
|
type OperatorUnsealCommand struct {
|
||||||
*BaseCommand
|
*BaseCommand
|
||||||
|
|
||||||
flagReset bool
|
flagReset bool
|
||||||
|
flagMigrate bool
|
||||||
|
|
||||||
testOutput io.Writer // for tests
|
testOutput io.Writer // for tests
|
||||||
}
|
}
|
||||||
@@ -64,6 +66,16 @@ func (c *OperatorUnsealCommand) Flags() *FlagSets {
|
|||||||
Usage: "Discard any previously entered keys to the unseal process.",
|
Usage: "Discard any previously entered keys to the unseal process.",
|
||||||
})
|
})
|
||||||
|
|
||||||
|
f.BoolVar(&BoolVar{
|
||||||
|
Name: "migrate",
|
||||||
|
Aliases: []string{},
|
||||||
|
Target: &c.flagMigrate,
|
||||||
|
Default: false,
|
||||||
|
EnvVar: "",
|
||||||
|
Completion: complete.PredictNothing,
|
||||||
|
Usage: "Indicate that this share is provided with the intent that it is part of a seal migration process.",
|
||||||
|
})
|
||||||
|
|
||||||
return set
|
return set
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,7 +147,10 @@ func (c *OperatorUnsealCommand) Run(args []string) int {
|
|||||||
unsealKey = strings.TrimSpace(value)
|
unsealKey = strings.TrimSpace(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
status, err := client.Sys().Unseal(unsealKey)
|
status, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
|
||||||
|
Key: unsealKey,
|
||||||
|
Migrate: c.flagMigrate,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.UI.Error(fmt.Sprintf("Error unsealing: %s", err))
|
c.UI.Error(fmt.Sprintf("Error unsealing: %s", err))
|
||||||
return 2
|
return 2
|
||||||
|
|||||||
291
command/seal_migration_test.go
Normal file
291
command/seal_migration_test.go
Normal file
@@ -0,0 +1,291 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
hclog "github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/hashicorp/vault/api"
|
||||||
|
"github.com/hashicorp/vault/command/server"
|
||||||
|
"github.com/hashicorp/vault/helper/logging"
|
||||||
|
vaulthttp "github.com/hashicorp/vault/http"
|
||||||
|
"github.com/hashicorp/vault/physical"
|
||||||
|
physInmem "github.com/hashicorp/vault/physical/inmem"
|
||||||
|
"github.com/hashicorp/vault/shamir"
|
||||||
|
"github.com/hashicorp/vault/vault"
|
||||||
|
"github.com/hashicorp/vault/vault/seal"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSealMigration(t *testing.T) {
|
||||||
|
logger := logging.NewVaultLogger(hclog.Trace)
|
||||||
|
phys, err := physInmem.NewInmem(nil, logger)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
haPhys, err := physInmem.NewInmemHA(nil, logger)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
shamirSeal := vault.NewDefaultSeal()
|
||||||
|
coreConfig := &vault.CoreConfig{
|
||||||
|
Seal: shamirSeal,
|
||||||
|
Physical: phys,
|
||||||
|
HAPhysical: haPhys.(physical.HABackend),
|
||||||
|
DisableSealWrap: true,
|
||||||
|
}
|
||||||
|
clusterConfig := &vault.TestClusterOptions{
|
||||||
|
Logger: logger,
|
||||||
|
HandlerFunc: vaulthttp.Handler,
|
||||||
|
SkipInit: true,
|
||||||
|
NumCores: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
var rootToken string
|
||||||
|
|
||||||
|
// First: start up as normal with shamir seal, init it
|
||||||
|
{
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
coreConfig = cluster.Cores[0].CoreConfig
|
||||||
|
|
||||||
|
// Init
|
||||||
|
resp, err := client.Sys().Init(&api.InitRequest{
|
||||||
|
SecretShares: 2,
|
||||||
|
SecretThreshold: 2,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
keys = resp.KeysB64
|
||||||
|
rootToken = resp.RootToken
|
||||||
|
|
||||||
|
// Now seal
|
||||||
|
cluster.Cleanup()
|
||||||
|
// This will prevent cleanup from running again on the defer
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second: start up as normal with shamir seal and unseal, make sure
|
||||||
|
// everything is normal
|
||||||
|
{
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
client.SetToken(rootToken)
|
||||||
|
|
||||||
|
var resp *api.SealStatusResponse
|
||||||
|
for _, key := range keys {
|
||||||
|
resp, err = client.Sys().Unseal(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.Sealed {
|
||||||
|
t.Fatal("expected unsealed state")
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.Cleanup()
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var autoSeal vault.Seal
|
||||||
|
|
||||||
|
// Third: create an autoseal and activate migration
|
||||||
|
{
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
core := cluster.Cores[0].Core
|
||||||
|
|
||||||
|
newSeal := vault.NewAutoSeal(&seal.TestSeal{})
|
||||||
|
newSeal.SetCore(core)
|
||||||
|
autoSeal = newSeal
|
||||||
|
if err := adjustCoreForSealMigration(context.Background(), core, coreConfig, newSeal, &server.Config{
|
||||||
|
Seal: &server.Seal{
|
||||||
|
Type: "test-auto",
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
client.SetToken(rootToken)
|
||||||
|
|
||||||
|
var resp *api.SealStatusResponse
|
||||||
|
unsealOpts := &api.UnsealOpts{}
|
||||||
|
for _, key := range keys {
|
||||||
|
unsealOpts.Key = key
|
||||||
|
unsealOpts.Migrate = false
|
||||||
|
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error due to lack of migrate parameter")
|
||||||
|
}
|
||||||
|
unsealOpts.Migrate = true
|
||||||
|
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.Sealed {
|
||||||
|
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.Cleanup()
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fourth: verify autoseal and recovery key usage
|
||||||
|
{
|
||||||
|
coreConfig.Seal = autoSeal
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
core := cluster.Cores[0].Core
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
client.SetToken(rootToken)
|
||||||
|
|
||||||
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := client.Sys().SealStatus()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
if resp.Sealed {
|
||||||
|
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyParts := [][]byte{}
|
||||||
|
for _, key := range keys {
|
||||||
|
raw, err := base64.StdEncoding.DecodeString(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
keyParts = append(keyParts, raw)
|
||||||
|
}
|
||||||
|
recoveredKey, err := shamir.Combine(keyParts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
sealAccess := core.SealAccess()
|
||||||
|
if err := sealAccess.VerifyRecoveryKey(context.Background(), recoveredKey); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.Cleanup()
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fifth: create an autoseal and activate migration. Verify it doesn't work
|
||||||
|
// if disabled isn't set.
|
||||||
|
{
|
||||||
|
coreConfig.Seal = autoSeal
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
core := cluster.Cores[0].Core
|
||||||
|
|
||||||
|
serverConf := &server.Config{
|
||||||
|
Seal: &server.Seal{
|
||||||
|
Type: "test-auto",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := adjustCoreForSealMigration(context.Background(), core, coreConfig, shamirSeal, serverConf); err == nil {
|
||||||
|
t.Fatal("expected error since disabled isn't set true")
|
||||||
|
}
|
||||||
|
serverConf.Seal.Disabled = true
|
||||||
|
if err := adjustCoreForSealMigration(context.Background(), core, coreConfig, shamirSeal, serverConf); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
client.SetToken(rootToken)
|
||||||
|
|
||||||
|
var resp *api.SealStatusResponse
|
||||||
|
unsealOpts := &api.UnsealOpts{}
|
||||||
|
for _, key := range keys {
|
||||||
|
unsealOpts.Key = key
|
||||||
|
unsealOpts.Migrate = false
|
||||||
|
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("expected error due to lack of migrate parameter")
|
||||||
|
}
|
||||||
|
unsealOpts.Migrate = true
|
||||||
|
resp, err = client.Sys().UnsealWithOptions(unsealOpts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.Sealed {
|
||||||
|
t.Fatalf("expected unsealed state; got %#v", *resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.Cleanup()
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sixth: verify autoseal is off and the expected key shares work
|
||||||
|
{
|
||||||
|
coreConfig.Seal = shamirSeal
|
||||||
|
cluster := vault.NewTestCluster(t, coreConfig, clusterConfig)
|
||||||
|
cluster.Start()
|
||||||
|
defer cluster.Cleanup()
|
||||||
|
|
||||||
|
core := cluster.Cores[0].Core
|
||||||
|
client := cluster.Cores[0].Client
|
||||||
|
client.SetToken(rootToken)
|
||||||
|
|
||||||
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
resp, err := client.Sys().SealStatus()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
if !resp.Sealed {
|
||||||
|
t.Fatalf("expected sealed state; got %#v", *resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
resp, err = client.Sys().Unseal(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
t.Fatal("expected response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resp.Sealed {
|
||||||
|
t.Fatal("expected unsealed state")
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.Cleanup()
|
||||||
|
cluster.Cores = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -53,7 +53,7 @@ import (
|
|||||||
var _ cli.Command = (*ServerCommand)(nil)
|
var _ cli.Command = (*ServerCommand)(nil)
|
||||||
var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
|
var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
|
||||||
|
|
||||||
const migrationLock = "core/migration"
|
const storageMigrationLock = "core/migration"
|
||||||
|
|
||||||
type ServerCommand struct {
|
type ServerCommand struct {
|
||||||
*BaseCommand
|
*BaseCommand
|
||||||
@@ -464,7 +464,7 @@ func (c *ServerCommand) Run(args []string) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Prevent server startup if migration is active
|
// Prevent server startup if migration is active
|
||||||
if c.migrationActive(backend) {
|
if c.storageMigrationActive(backend) {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -899,8 +899,13 @@ CLUSTER_SYNTHESIS_COMPLETE:
|
|||||||
Core: core,
|
Core: core,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
err = core.UnsealWithStoredKeys(context.Background())
|
// Before unsealing with stored keys, setup seal migration if needed
|
||||||
if err != nil {
|
if err := adjustCoreForSealMigration(context.Background(), core, coreConfig, seal, config); err != nil {
|
||||||
|
c.UI.Error(err.Error())
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
||||||
if !errwrap.ContainsType(err, new(vault.NonFatalError)) {
|
if !errwrap.ContainsType(err, new(vault.NonFatalError)) {
|
||||||
c.UI.Error(fmt.Sprintf("Error initializing core: %s", err))
|
c.UI.Error(fmt.Sprintf("Error initializing core: %s", err))
|
||||||
return 1
|
return 1
|
||||||
@@ -1784,13 +1789,13 @@ func (c *ServerCommand) removePidFile(pidPath string) error {
|
|||||||
return os.Remove(pidPath)
|
return os.Remove(pidPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// migrationActive checks and warns against in-progress storage migrations.
|
// storageMigrationActive checks and warns against in-progress storage migrations.
|
||||||
// This function will block until storage is available.
|
// This function will block until storage is available.
|
||||||
func (c *ServerCommand) migrationActive(backend physical.Backend) bool {
|
func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool {
|
||||||
first := true
|
first := true
|
||||||
|
|
||||||
for {
|
for {
|
||||||
migrationStatus, err := CheckMigration(backend)
|
migrationStatus, err := CheckStorageMigration(backend)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if migrationStatus != nil {
|
if migrationStatus != nil {
|
||||||
startTime := migrationStatus.Start.Format(time.RFC3339)
|
startTime := migrationStatus.Start.Format(time.RFC3339)
|
||||||
@@ -1803,12 +1808,12 @@ func (c *ServerCommand) migrationActive(backend physical.Backend) bool {
|
|||||||
}
|
}
|
||||||
if first {
|
if first {
|
||||||
first = false
|
first = false
|
||||||
c.UI.Warn("\nWARNING! Unable to read migration status.")
|
c.UI.Warn("\nWARNING! Unable to read storage migration status.")
|
||||||
|
|
||||||
// unexpected state, so stop buffering log messages
|
// unexpected state, so stop buffering log messages
|
||||||
c.logGate.Flush()
|
c.logGate.Flush()
|
||||||
}
|
}
|
||||||
c.logger.Warn("migration check error", "error", err.Error())
|
c.logger.Warn("storage migration check error", "error", err.Error())
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(2 * time.Second):
|
case <-time.After(2 * time.Second):
|
||||||
@@ -1819,12 +1824,12 @@ func (c *ServerCommand) migrationActive(backend physical.Backend) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
type MigrationStatus struct {
|
type StorageMigrationStatus struct {
|
||||||
Start time.Time `json:"start"`
|
Start time.Time `json:"start"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func CheckMigration(b physical.Backend) (*MigrationStatus, error) {
|
func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) {
|
||||||
entry, err := b.Get(context.Background(), migrationLock)
|
entry, err := b.Get(context.Background(), storageMigrationLock)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1834,7 +1839,7 @@ func CheckMigration(b physical.Backend) (*MigrationStatus, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var status MigrationStatus
|
var status StorageMigrationStatus
|
||||||
if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil {
|
if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1842,12 +1847,12 @@ func CheckMigration(b physical.Backend) (*MigrationStatus, error) {
|
|||||||
return &status, nil
|
return &status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetMigration(b physical.Backend, active bool) error {
|
func SetStorageMigration(b physical.Backend, active bool) error {
|
||||||
if !active {
|
if !active {
|
||||||
return b.Delete(context.Background(), migrationLock)
|
return b.Delete(context.Background(), storageMigrationLock)
|
||||||
}
|
}
|
||||||
|
|
||||||
status := MigrationStatus{
|
status := StorageMigrationStatus{
|
||||||
Start: time.Now(),
|
Start: time.Now(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1857,7 +1862,7 @@ func SetMigration(b physical.Backend, active bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
entry := &physical.Entry{
|
entry := &physical.Entry{
|
||||||
Key: migrationLock,
|
Key: storageMigrationLock,
|
||||||
Value: enc,
|
Value: enc,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -135,8 +135,9 @@ func (b *Storage) GoString() string {
|
|||||||
|
|
||||||
// Seal contains Seal configuration for the server
|
// Seal contains Seal configuration for the server
|
||||||
type Seal struct {
|
type Seal struct {
|
||||||
Type string
|
Type string
|
||||||
Config map[string]string
|
Disabled bool
|
||||||
|
Config map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Seal) GoString() string {
|
func (h *Seal) GoString() string {
|
||||||
@@ -748,9 +749,20 @@ func parseSeal(result *Config, list *ast.ObjectList, blockName string) error {
|
|||||||
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
|
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var disabled bool
|
||||||
|
var err error
|
||||||
|
if v, ok := m["disabled"]; ok {
|
||||||
|
disabled, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
|
||||||
|
}
|
||||||
|
delete(m, "disabled")
|
||||||
|
}
|
||||||
|
|
||||||
result.Seal = &Seal{
|
result.Seal = &Seal{
|
||||||
Type: strings.ToLower(key),
|
Type: strings.ToLower(key),
|
||||||
Config: m,
|
Disabled: disabled,
|
||||||
|
Config: m,
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
75
command/server_util.go
Normal file
75
command/server_util.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package command
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/vault/command/server"
|
||||||
|
"github.com/hashicorp/vault/vault"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
onEnterprise = false
|
||||||
|
)
|
||||||
|
|
||||||
|
func adjustCoreForSealMigration(ctx context.Context, core *vault.Core, coreConfig *vault.CoreConfig, seal vault.Seal, config *server.Config) error {
|
||||||
|
existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error checking for existing seal: %s", err)
|
||||||
|
}
|
||||||
|
var existSeal vault.Seal
|
||||||
|
var newSeal vault.Seal
|
||||||
|
if existBarrierSealConfig != nil &&
|
||||||
|
(existBarrierSealConfig.Type != seal.BarrierType() ||
|
||||||
|
config.Seal != nil && config.Seal.Disabled) {
|
||||||
|
// If the existing seal is not Shamir, we're going to Shamir, which
|
||||||
|
// means we require them setting "disabled" to true in their
|
||||||
|
// configuration as a sanity check.
|
||||||
|
if (config.Seal == nil || !config.Seal.Disabled) && existBarrierSealConfig.Type != seal.Shamir {
|
||||||
|
return errors.New(`Seal migration requires specifying "disabled" as "true" in the "seal" block of Vault's configuration file"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conversely, if they are going from Shamir to auto, we want to
|
||||||
|
// ensure disabled is *not* set
|
||||||
|
if existBarrierSealConfig.Type == seal.Shamir && config.Seal != nil && config.Seal.Disabled {
|
||||||
|
coreConfig.Logger.Warn(`when not migrating, Vault's config should not specify "disabled" as "true" in the "seal" block of Vault's configuration file`)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if existBarrierSealConfig.Type != seal.shamir && existRecoverySealConfig == nil {
|
||||||
|
return errors.New(`Recovery seal configuration not found for existing seal`)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch existBarrierSealConfig.Type {
|
||||||
|
case seal.Shamir:
|
||||||
|
// The value reflected in config is what we're going to
|
||||||
|
existSeal = vault.NewDefaultSeal()
|
||||||
|
existSeal.SetCore(core)
|
||||||
|
newSeal = seal
|
||||||
|
newBarrierSealConfig := &vault.SealConfig{
|
||||||
|
Type: newSeal.BarrierType(),
|
||||||
|
SecretShares: 1,
|
||||||
|
SecretThreshold: 1,
|
||||||
|
StoredShares: 1,
|
||||||
|
}
|
||||||
|
newSeal.SetCachedBarrierConfig(newBarrierSealConfig)
|
||||||
|
newSeal.SetCachedRecoveryConfig(existBarrierSealConfig)
|
||||||
|
|
||||||
|
default:
|
||||||
|
if onEnterprise {
|
||||||
|
return errors.New("Migrating from autoseal to Shamir seal is not supported on Vault Enterprise")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The disabled value reflected in config is what we're going from
|
||||||
|
existSeal = coreConfig.Seal
|
||||||
|
newSeal = vault.NewDefaultSeal()
|
||||||
|
newSeal.SetCore(core)
|
||||||
|
newSeal.SetCachedBarrierConfig(existRecoverySealConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
core.SetSealsForMigration(existSeal, newSeal)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -105,6 +105,7 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testResponseStatus(t *testing.T, resp *http.Response, code int) {
|
func testResponseStatus(t *testing.T, resp *http.Response, code int) {
|
||||||
|
t.Helper()
|
||||||
if resp.StatusCode != code {
|
if resp.StatusCode != code {
|
||||||
body := new(bytes.Buffer)
|
body := new(bytes.Buffer)
|
||||||
io.Copy(body, resp.Body)
|
io.Copy(body, resp.Body)
|
||||||
|
|||||||
@@ -36,13 +36,13 @@ func TestSysGenerateRootAttempt_Status(t *testing.T) {
|
|||||||
"encoded_root_token": "",
|
"encoded_root_token": "",
|
||||||
"pgp_fingerprint": "",
|
"pgp_fingerprint": "",
|
||||||
"nonce": "",
|
"nonce": "",
|
||||||
"otp": "",
|
"otp_length": json.Number("26"),
|
||||||
"otp_length": json.Number("24"),
|
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
expected["otp"] = actual["otp"]
|
||||||
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,15 +52,7 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
|
|||||||
defer ln.Close()
|
defer ln.Close()
|
||||||
TestServerAuth(t, addr, token)
|
TestServerAuth(t, addr, token)
|
||||||
|
|
||||||
otpBytes, err := vault.GenerateRandBytes(16)
|
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
otp := base64.StdEncoding.EncodeToString(otpBytes)
|
|
||||||
|
|
||||||
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
|
|
||||||
"otp": otp,
|
|
||||||
})
|
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
|
|
||||||
var actual map[string]interface{}
|
var actual map[string]interface{}
|
||||||
@@ -72,8 +64,7 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
|
|||||||
"encoded_token": "",
|
"encoded_token": "",
|
||||||
"encoded_root_token": "",
|
"encoded_root_token": "",
|
||||||
"pgp_fingerprint": "",
|
"pgp_fingerprint": "",
|
||||||
"otp": "",
|
"otp_length": json.Number("26"),
|
||||||
"otp_length": json.Number("24"),
|
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -81,8 +72,9 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
|
|||||||
t.Fatalf("nonce was empty")
|
t.Fatalf("nonce was empty")
|
||||||
}
|
}
|
||||||
expected["nonce"] = actual["nonce"]
|
expected["nonce"] = actual["nonce"]
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
expected["otp"] = actual["otp"]
|
||||||
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
|
resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt")
|
||||||
@@ -97,7 +89,7 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
|
|||||||
"encoded_root_token": "",
|
"encoded_root_token": "",
|
||||||
"pgp_fingerprint": "",
|
"pgp_fingerprint": "",
|
||||||
"otp": "",
|
"otp": "",
|
||||||
"otp_length": json.Number("24"),
|
"otp_length": json.Number("26"),
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -133,7 +125,7 @@ func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) {
|
|||||||
"encoded_root_token": "",
|
"encoded_root_token": "",
|
||||||
"pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
|
"pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
|
||||||
"otp": "",
|
"otp": "",
|
||||||
"otp_length": json.Number("24"),
|
"otp_length": json.Number("26"),
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -141,8 +133,8 @@ func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) {
|
|||||||
t.Fatalf("nonce was empty")
|
t.Fatalf("nonce was empty")
|
||||||
}
|
}
|
||||||
expected["nonce"] = actual["nonce"]
|
expected["nonce"] = actual["nonce"]
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,15 +144,7 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
|
|||||||
defer ln.Close()
|
defer ln.Close()
|
||||||
TestServerAuth(t, addr, token)
|
TestServerAuth(t, addr, token)
|
||||||
|
|
||||||
otpBytes, err := vault.GenerateRandBytes(16)
|
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
otp := base64.StdEncoding.EncodeToString(otpBytes)
|
|
||||||
|
|
||||||
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
|
|
||||||
"otp": otp,
|
|
||||||
})
|
|
||||||
|
|
||||||
var actual map[string]interface{}
|
var actual map[string]interface{}
|
||||||
expected := map[string]interface{}{
|
expected := map[string]interface{}{
|
||||||
@@ -171,8 +155,7 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
|
|||||||
"encoded_token": "",
|
"encoded_token": "",
|
||||||
"encoded_root_token": "",
|
"encoded_root_token": "",
|
||||||
"pgp_fingerprint": "",
|
"pgp_fingerprint": "",
|
||||||
"otp": "",
|
"otp_length": json.Number("26"),
|
||||||
"otp_length": json.Number("24"),
|
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -180,14 +163,15 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
|
|||||||
t.Fatalf("nonce was empty")
|
t.Fatalf("nonce was empty")
|
||||||
}
|
}
|
||||||
expected["nonce"] = actual["nonce"]
|
expected["nonce"] = actual["nonce"]
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
expected["otp"] = actual["otp"]
|
||||||
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
|
resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
|
||||||
testResponseStatus(t, resp, 204)
|
testResponseStatus(t, resp, 204)
|
||||||
|
|
||||||
resp, err = http.Get(addr + "/v1/sys/generate-root/attempt")
|
resp, err := http.Get(addr + "/v1/sys/generate-root/attempt")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
@@ -203,7 +187,7 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
|
|||||||
"pgp_fingerprint": "",
|
"pgp_fingerprint": "",
|
||||||
"nonce": "",
|
"nonce": "",
|
||||||
"otp": "",
|
"otp": "",
|
||||||
"otp_length": json.Number("24"),
|
"otp_length": json.Number("26"),
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -218,15 +202,8 @@ func TestSysGenerateRoot_badKey(t *testing.T) {
|
|||||||
defer ln.Close()
|
defer ln.Close()
|
||||||
TestServerAuth(t, addr, token)
|
TestServerAuth(t, addr, token)
|
||||||
|
|
||||||
otpBytes, err := vault.GenerateRandBytes(16)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
otp := base64.StdEncoding.EncodeToString(otpBytes)
|
|
||||||
|
|
||||||
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
|
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{
|
||||||
"key": "0123",
|
"key": "0123",
|
||||||
"otp": otp,
|
|
||||||
})
|
})
|
||||||
testResponseStatus(t, resp, 400)
|
testResponseStatus(t, resp, 400)
|
||||||
}
|
}
|
||||||
@@ -237,14 +214,7 @@ func TestSysGenerateRoot_ReAttemptUpdate(t *testing.T) {
|
|||||||
defer ln.Close()
|
defer ln.Close()
|
||||||
TestServerAuth(t, addr, token)
|
TestServerAuth(t, addr, token)
|
||||||
|
|
||||||
otpBytes, err := vault.GenerateRandBytes(16)
|
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
otp := base64.StdEncoding.EncodeToString(otpBytes)
|
|
||||||
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
|
|
||||||
"otp": otp,
|
|
||||||
})
|
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
|
|
||||||
resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
|
resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt")
|
||||||
|
|||||||
106
http/sys_seal.go
106
http/sys_seal.go
@@ -90,12 +90,6 @@ func handleSysUnseal(core *vault.Core) http.Handler {
|
|||||||
respondError(w, http.StatusBadRequest, err)
|
respondError(w, http.StatusBadRequest, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !req.Reset && req.Key == "" {
|
|
||||||
respondError(
|
|
||||||
w, http.StatusBadRequest,
|
|
||||||
errors.New("'key' must be specified in request body as JSON, or 'reset' set to true"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Reset {
|
if req.Reset {
|
||||||
if !core.Sealed() {
|
if !core.Sealed() {
|
||||||
@@ -103,46 +97,68 @@ func handleSysUnseal(core *vault.Core) http.Handler {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
core.ResetUnsealProcess()
|
core.ResetUnsealProcess()
|
||||||
} else {
|
handleSysSealStatusRaw(core, w, r)
|
||||||
// Decode the key, which is base64 or hex encoded
|
return
|
||||||
min, max := core.BarrierKeyLength()
|
}
|
||||||
key, err := hex.DecodeString(req.Key)
|
|
||||||
// We check min and max here to ensure that a string that is base64
|
|
||||||
// encoded but also valid hex will not be valid and we instead base64
|
|
||||||
// decode it
|
|
||||||
if err != nil || len(key) < min || len(key) > max {
|
|
||||||
key, err = base64.StdEncoding.DecodeString(req.Key)
|
|
||||||
if err != nil {
|
|
||||||
respondError(
|
|
||||||
w, http.StatusBadRequest,
|
|
||||||
errors.New("'key' must be a valid hex or base64 string"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attempt the unseal
|
isInSealMigration := core.IsInSealMigration()
|
||||||
ctx := context.Background()
|
if !req.Migrate && isInSealMigration {
|
||||||
if core.SealAccess().RecoveryKeySupported() {
|
respondError(
|
||||||
_, err = core.UnsealWithRecoveryKeys(ctx, key)
|
w, http.StatusBadRequest,
|
||||||
} else {
|
errors.New("'migrate' parameter must be set true in JSON body when in seal migration mode"))
|
||||||
_, err = core.Unseal(key)
|
return
|
||||||
}
|
}
|
||||||
|
if req.Migrate && !isInSealMigration {
|
||||||
|
respondError(
|
||||||
|
w, http.StatusBadRequest,
|
||||||
|
errors.New("'migrate' parameter set true in JSON body when not in seal migration mode"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Key == "" {
|
||||||
|
respondError(
|
||||||
|
w, http.StatusBadRequest,
|
||||||
|
errors.New("'key' must be specified in request body as JSON, or 'reset' set to true"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the key, which is base64 or hex encoded
|
||||||
|
min, max := core.BarrierKeyLength()
|
||||||
|
key, err := hex.DecodeString(req.Key)
|
||||||
|
// We check min and max here to ensure that a string that is base64
|
||||||
|
// encoded but also valid hex will not be valid and we instead base64
|
||||||
|
// decode it
|
||||||
|
if err != nil || len(key) < min || len(key) > max {
|
||||||
|
key, err = base64.StdEncoding.DecodeString(req.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch {
|
respondError(
|
||||||
case errwrap.ContainsType(err, new(vault.ErrInvalidKey)):
|
w, http.StatusBadRequest,
|
||||||
case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()):
|
errors.New("'key' must be a valid hex or base64 string"))
|
||||||
case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()):
|
|
||||||
case errwrap.Contains(err, vault.ErrBarrierSealed.Error()):
|
|
||||||
case errwrap.Contains(err, consts.ErrStandby.Error()):
|
|
||||||
default:
|
|
||||||
respondError(w, http.StatusInternalServerError, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
respondError(w, http.StatusBadRequest, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Attempt the unseal
|
||||||
|
if core.SealAccess().RecoveryKeySupported() {
|
||||||
|
_, err = core.UnsealWithRecoveryKeys(key)
|
||||||
|
} else {
|
||||||
|
_, err = core.Unseal(key)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
switch {
|
||||||
|
case errwrap.ContainsType(err, new(vault.ErrInvalidKey)):
|
||||||
|
case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()):
|
||||||
|
case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()):
|
||||||
|
case errwrap.Contains(err, vault.ErrBarrierSealed.Error()):
|
||||||
|
case errwrap.Contains(err, consts.ErrStandby.Error()):
|
||||||
|
default:
|
||||||
|
respondError(w, http.StatusInternalServerError, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
respondError(w, http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Return the seal status
|
// Return the seal status
|
||||||
handleSysSealStatusRaw(core, w, r)
|
handleSysSealStatusRaw(core, w, r)
|
||||||
})
|
})
|
||||||
@@ -213,6 +229,7 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req
|
|||||||
Progress: progress,
|
Progress: progress,
|
||||||
Nonce: nonce,
|
Nonce: nonce,
|
||||||
Version: version.GetVersion().VersionNumber(),
|
Version: version.GetVersion().VersionNumber(),
|
||||||
|
Migration: core.IsInSealMigration(),
|
||||||
ClusterName: clusterName,
|
ClusterName: clusterName,
|
||||||
ClusterID: clusterID,
|
ClusterID: clusterID,
|
||||||
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
|
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
|
||||||
@@ -228,12 +245,17 @@ type SealStatusResponse struct {
|
|||||||
Progress int `json:"progress"`
|
Progress int `json:"progress"`
|
||||||
Nonce string `json:"nonce"`
|
Nonce string `json:"nonce"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
|
Migration bool `json:"migration"`
|
||||||
ClusterName string `json:"cluster_name,omitempty"`
|
ClusterName string `json:"cluster_name,omitempty"`
|
||||||
ClusterID string `json:"cluster_id,omitempty"`
|
ClusterID string `json:"cluster_id,omitempty"`
|
||||||
RecoverySeal bool `json:"recovery_seal"`
|
RecoverySeal bool `json:"recovery_seal"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: because we didn't provide explicit tagging in the past we can't do it
|
||||||
|
// now because if it then no longer accepts capitalized versions it could break
|
||||||
|
// clients
|
||||||
type UnsealRequest struct {
|
type UnsealRequest struct {
|
||||||
Key string
|
Key string
|
||||||
Reset bool
|
Reset bool
|
||||||
|
Migrate bool
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/go-test/deep"
|
||||||
"github.com/hashicorp/vault/helper/namespace"
|
"github.com/hashicorp/vault/helper/namespace"
|
||||||
"github.com/hashicorp/vault/logical"
|
"github.com/hashicorp/vault/logical"
|
||||||
"github.com/hashicorp/vault/vault"
|
"github.com/hashicorp/vault/vault"
|
||||||
@@ -35,6 +35,7 @@ func TestSysSealStatus(t *testing.T) {
|
|||||||
"type": "shamir",
|
"type": "shamir",
|
||||||
"recovery_seal": false,
|
"recovery_seal": false,
|
||||||
"initialized": true,
|
"initialized": true,
|
||||||
|
"migration": false,
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -52,8 +53,8 @@ func TestSysSealStatus(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
expected["cluster_id"] = actual["cluster_id"]
|
expected["cluster_id"] = actual["cluster_id"]
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,6 +119,7 @@ func TestSysUnseal(t *testing.T) {
|
|||||||
"type": "shamir",
|
"type": "shamir",
|
||||||
"recovery_seal": false,
|
"recovery_seal": false,
|
||||||
"initialized": true,
|
"initialized": true,
|
||||||
|
"migration": false,
|
||||||
}
|
}
|
||||||
if i == len(keys)-1 {
|
if i == len(keys)-1 {
|
||||||
expected["sealed"] = false
|
expected["sealed"] = false
|
||||||
@@ -144,8 +146,8 @@ func TestSysUnseal(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
expected["cluster_id"] = actual["cluster_id"]
|
expected["cluster_id"] = actual["cluster_id"]
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
t.Fatalf("bad: expected: \n%#v\nactual: \n%#v", expected, actual)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -198,6 +200,7 @@ func TestSysUnseal_Reset(t *testing.T) {
|
|||||||
"type": "shamir",
|
"type": "shamir",
|
||||||
"recovery_seal": false,
|
"recovery_seal": false,
|
||||||
"initialized": true,
|
"initialized": true,
|
||||||
|
"migration": false,
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -219,8 +222,8 @@ func TestSysUnseal_Reset(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
expected["cluster_id"] = actual["cluster_id"]
|
expected["cluster_id"] = actual["cluster_id"]
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,6 +240,7 @@ func TestSysUnseal_Reset(t *testing.T) {
|
|||||||
"type": "shamir",
|
"type": "shamir",
|
||||||
"recovery_seal": false,
|
"recovery_seal": false,
|
||||||
"initialized": true,
|
"initialized": true,
|
||||||
|
"migration": false,
|
||||||
}
|
}
|
||||||
testResponseStatus(t, resp, 200)
|
testResponseStatus(t, resp, 200)
|
||||||
testResponseBody(t, resp, &actual)
|
testResponseBody(t, resp, &actual)
|
||||||
@@ -255,8 +259,8 @@ func TestSysUnseal_Reset(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
expected["cluster_id"] = actual["cluster_id"]
|
expected["cluster_id"] = actual["cluster_id"]
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual, expected) {
|
if diff := deep.Equal(actual, expected); diff != nil {
|
||||||
t.Fatalf("\nexpected:\n%#v\nactual:\n%#v\n", expected, actual)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
263
vault/core.go
263
vault/core.go
@@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
"github.com/hashicorp/vault/audit"
|
"github.com/hashicorp/vault/audit"
|
||||||
"github.com/hashicorp/vault/helper/consts"
|
"github.com/hashicorp/vault/helper/consts"
|
||||||
|
"github.com/hashicorp/vault/helper/jsonutil"
|
||||||
"github.com/hashicorp/vault/helper/logging"
|
"github.com/hashicorp/vault/helper/logging"
|
||||||
"github.com/hashicorp/vault/helper/mlock"
|
"github.com/hashicorp/vault/helper/mlock"
|
||||||
"github.com/hashicorp/vault/helper/namespace"
|
"github.com/hashicorp/vault/helper/namespace"
|
||||||
@@ -153,9 +154,13 @@ type Core struct {
|
|||||||
// physical backend is the un-trusted backend with durable data
|
// physical backend is the un-trusted backend with durable data
|
||||||
physical physical.Backend
|
physical physical.Backend
|
||||||
|
|
||||||
// Our Seal, for seal configuration information
|
// seal is our seal, for seal configuration information
|
||||||
seal Seal
|
seal Seal
|
||||||
|
|
||||||
|
// migrationSeal is the seal to use during a migration operation. It is the
|
||||||
|
// seal we're migrating *from*.
|
||||||
|
migrationSeal Seal
|
||||||
|
|
||||||
// barrier is the security barrier wrapping the physical backend
|
// barrier is the security barrier wrapping the physical backend
|
||||||
barrier SecurityBarrier
|
barrier SecurityBarrier
|
||||||
|
|
||||||
@@ -458,6 +463,38 @@ type CoreConfig struct {
|
|||||||
AllLoggers []log.Logger
|
AllLoggers []log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *CoreConfig) Clone() *CoreConfig {
|
||||||
|
return &CoreConfig{
|
||||||
|
DevToken: c.DevToken,
|
||||||
|
LogicalBackends: c.LogicalBackends,
|
||||||
|
CredentialBackends: c.CredentialBackends,
|
||||||
|
AuditBackends: c.AuditBackends,
|
||||||
|
Physical: c.Physical,
|
||||||
|
HAPhysical: c.HAPhysical,
|
||||||
|
Seal: c.Seal,
|
||||||
|
Logger: c.Logger,
|
||||||
|
DisableCache: c.DisableCache,
|
||||||
|
DisableMlock: c.DisableMlock,
|
||||||
|
CacheSize: c.CacheSize,
|
||||||
|
RedirectAddr: c.RedirectAddr,
|
||||||
|
ClusterAddr: c.ClusterAddr,
|
||||||
|
DefaultLeaseTTL: c.DefaultLeaseTTL,
|
||||||
|
MaxLeaseTTL: c.MaxLeaseTTL,
|
||||||
|
ClusterName: c.ClusterName,
|
||||||
|
ClusterCipherSuites: c.ClusterCipherSuites,
|
||||||
|
EnableUI: c.EnableUI,
|
||||||
|
EnableRaw: c.EnableRaw,
|
||||||
|
PluginDirectory: c.PluginDirectory,
|
||||||
|
DisableSealWrap: c.DisableSealWrap,
|
||||||
|
ReloadFuncs: c.ReloadFuncs,
|
||||||
|
ReloadFuncsLock: c.ReloadFuncsLock,
|
||||||
|
LicensingConfig: c.LicensingConfig,
|
||||||
|
DevLicenseDuration: c.DevLicenseDuration,
|
||||||
|
DisablePerformanceStandby: c.DisablePerformanceStandby,
|
||||||
|
AllLoggers: c.AllLoggers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// NewCore is used to construct a new core
|
// NewCore is used to construct a new core
|
||||||
func NewCore(conf *CoreConfig) (*Core, error) {
|
func NewCore(conf *CoreConfig) (*Core, error) {
|
||||||
if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
|
if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() {
|
||||||
@@ -712,6 +749,14 @@ func (c *Core) ResetUnsealProcess() {
|
|||||||
// this method is done with it. If you want to keep the key around, a copy
|
// this method is done with it. If you want to keep the key around, a copy
|
||||||
// should be made.
|
// should be made.
|
||||||
func (c *Core) Unseal(key []byte) (bool, error) {
|
func (c *Core) Unseal(key []byte) (bool, error) {
|
||||||
|
return c.unseal(key, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Core) UnsealWithRecoveryKeys(key []byte) (bool, error) {
|
||||||
|
return c.unseal(key, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
|
||||||
defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
|
defer metrics.MeasureSince([]string{"core", "unseal"}, time.Now())
|
||||||
|
|
||||||
c.stateLock.Lock()
|
c.stateLock.Lock()
|
||||||
@@ -739,60 +784,17 @@ func (c *Core) Unseal(key []byte) (bool, error) {
|
|||||||
return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
|
return false, &ErrInvalidKey{fmt.Sprintf("key is longer than maximum %d bytes", max)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the barrier seal configuration
|
|
||||||
config, err := c.seal.BarrierConfig(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if already unsealed
|
// Check if already unsealed
|
||||||
if !c.Sealed() {
|
if !c.Sealed() {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
masterKey, err := c.unsealPart(ctx, config, key, false)
|
sealToUse := c.seal
|
||||||
if err != nil {
|
if c.migrationSeal != nil {
|
||||||
return false, err
|
sealToUse = c.migrationSeal
|
||||||
}
|
|
||||||
if masterKey != nil {
|
|
||||||
return c.unsealInternal(ctx, masterKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
masterKey, err := c.unsealPart(ctx, sealToUse, key, useRecoveryKeys)
|
||||||
}
|
|
||||||
|
|
||||||
// UnsealWithRecoveryKeys is used to provide one of the recovery key shares to
|
|
||||||
// unseal the Vault.
|
|
||||||
func (c *Core) UnsealWithRecoveryKeys(ctx context.Context, key []byte) (bool, error) {
|
|
||||||
defer metrics.MeasureSince([]string{"core", "unseal_with_recovery_keys"}, time.Now())
|
|
||||||
|
|
||||||
c.stateLock.Lock()
|
|
||||||
defer c.stateLock.Unlock()
|
|
||||||
|
|
||||||
// Explicitly check for init status
|
|
||||||
init, err := c.Initialized(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if !init {
|
|
||||||
return false, ErrNotInit
|
|
||||||
}
|
|
||||||
|
|
||||||
var config *SealConfig
|
|
||||||
// If recovery keys are supported then use recovery seal config to unseal
|
|
||||||
if c.seal.RecoveryKeySupported() {
|
|
||||||
config, err = c.seal.RecoveryConfig(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if already unsealed
|
|
||||||
if !c.Sealed() {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
masterKey, err := c.unsealPart(ctx, config, key, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -805,7 +807,7 @@ func (c *Core) UnsealWithRecoveryKeys(ctx context.Context, key []byte) (bool, er
|
|||||||
|
|
||||||
// unsealPart takes in a key share, and returns the master key if the threshold
|
// unsealPart takes in a key share, and returns the master key if the threshold
|
||||||
// is met. If recovery keys are supported, recovery key shares may be provided.
|
// is met. If recovery keys are supported, recovery key shares may be provided.
|
||||||
func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, useRecoveryKeys bool) ([]byte, error) {
|
func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecoveryKeys bool) ([]byte, error) {
|
||||||
// Check if we already have this piece
|
// Check if we already have this piece
|
||||||
if c.unlockInfo != nil {
|
if c.unlockInfo != nil {
|
||||||
for _, existing := range c.unlockInfo.Parts {
|
for _, existing := range c.unlockInfo.Parts {
|
||||||
@@ -826,6 +828,17 @@ func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, u
|
|||||||
// Store this key
|
// Store this key
|
||||||
c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
|
c.unlockInfo.Parts = append(c.unlockInfo.Parts, key)
|
||||||
|
|
||||||
|
var config *SealConfig
|
||||||
|
var err error
|
||||||
|
if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) {
|
||||||
|
config, err = seal.RecoveryConfig(ctx)
|
||||||
|
} else {
|
||||||
|
config, err = seal.BarrierConfig(ctx)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Check if we don't have enough keys to unlock, proceed through the rest of
|
// Check if we don't have enough keys to unlock, proceed through the rest of
|
||||||
// the call only if we have met the threshold
|
// the call only if we have met the threshold
|
||||||
if len(c.unlockInfo.Parts) < config.SecretThreshold {
|
if len(c.unlockInfo.Parts) < config.SecretThreshold {
|
||||||
@@ -846,7 +859,8 @@ func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, u
|
|||||||
// Recover the split key. recoveredKey is the shamir combined
|
// Recover the split key. recoveredKey is the shamir combined
|
||||||
// key, or the single provided key if the threshold is 1.
|
// key, or the single provided key if the threshold is 1.
|
||||||
var recoveredKey []byte
|
var recoveredKey []byte
|
||||||
var err error
|
var masterKey []byte
|
||||||
|
var recoveryKey []byte
|
||||||
if config.SecretThreshold == 1 {
|
if config.SecretThreshold == 1 {
|
||||||
recoveredKey = make([]byte, len(c.unlockInfo.Parts[0]))
|
recoveredKey = make([]byte, len(c.unlockInfo.Parts[0]))
|
||||||
copy(recoveredKey, c.unlockInfo.Parts[0])
|
copy(recoveredKey, c.unlockInfo.Parts[0])
|
||||||
@@ -857,39 +871,114 @@ func (c *Core) unsealPart(ctx context.Context, config *SealConfig, key []byte, u
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.seal.RecoveryKeySupported() && useRecoveryKeys {
|
if seal.RecoveryKeySupported() && (useRecoveryKeys || c.migrationSeal != nil) {
|
||||||
// Verify recovery key
|
// Verify recovery key
|
||||||
if err := c.seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
|
if err := seal.VerifyRecoveryKey(ctx, recoveredKey); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
recoveryKey = recoveredKey
|
||||||
|
|
||||||
// Get stored keys and shamir combine into single master key. Unsealing with
|
// Get stored keys and shamir combine into single master key. Unsealing with
|
||||||
// recovery keys currently does not support: 1) mixed stored and non-stored
|
// recovery keys currently does not support: 1) mixed stored and non-stored
|
||||||
// keys setup, nor 2) seals that support recovery keys but not stored keys.
|
// keys setup, nor 2) seals that support recovery keys but not stored keys.
|
||||||
// If insufficient shares are provided, shamir.Combine will error, and if
|
// If insufficient shares are provided, shamir.Combine will error, and if
|
||||||
// no stored keys are found it will return masterKey as nil.
|
// no stored keys are found it will return masterKey as nil.
|
||||||
var masterKey []byte
|
if seal.StoredKeysSupported() {
|
||||||
if c.seal.StoredKeysSupported() {
|
masterKeyShares, err := seal.GetStoredKeys(ctx)
|
||||||
masterKeyShares, err := c.seal.GetStoredKeys(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err)
|
return nil, errwrap.Wrapf("unable to retrieve stored keys: {{err}}", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(masterKeyShares) == 1 {
|
if len(masterKeyShares) == 1 {
|
||||||
return masterKeyShares[0], nil
|
masterKey = masterKeyShares[0]
|
||||||
}
|
} else {
|
||||||
|
masterKey, err = shamir.Combine(masterKeyShares)
|
||||||
masterKey, err = shamir.Combine(masterKeyShares)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err)
|
||||||
return nil, errwrap.Wrapf("failed to compute master key: {{err}}", err)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return masterKey, nil
|
} else {
|
||||||
|
masterKey = recoveredKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is not a recovery key-supported seal, then the recovered key is
|
// If we have a migration seal, now's the time!
|
||||||
// the master key to be returned.
|
if c.migrationSeal != nil {
|
||||||
return recoveredKey, nil
|
// Unseal the barrier so we can rekey
|
||||||
|
if err := c.barrier.Unseal(ctx, masterKey); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error unsealing barrier with constructed master key: {{err}}", err)
|
||||||
|
}
|
||||||
|
defer c.barrier.Seal()
|
||||||
|
|
||||||
|
// The seal used in this function will have been the migration seal,
|
||||||
|
// and c.seal will be the opposite type, so there are two
|
||||||
|
// possibilities: Shamir to auto, and auto to Shamir.
|
||||||
|
if !seal.RecoveryKeySupported() {
|
||||||
|
// The new seal will have recovery keys; we set it to the existing
|
||||||
|
// master key, so barrier key shares -> recovery key shares
|
||||||
|
if err := c.seal.SetRecoveryKey(ctx, masterKey); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error setting new recovery key information: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a new master key
|
||||||
|
newMasterKey, err := c.barrier.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error generating new master key: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rekey the barrier
|
||||||
|
if err := c.barrier.Rekey(ctx, newMasterKey); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the new master key
|
||||||
|
if err := c.seal.SetStoredKeys(ctx, [][]byte{newMasterKey}); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error storing new master key: {[err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the new key so it can be used to unlock the barrier
|
||||||
|
masterKey = newMasterKey
|
||||||
|
} else {
|
||||||
|
// In this case we have to ensure that the recovery information was
|
||||||
|
// set properly.
|
||||||
|
if recoveryKey == nil {
|
||||||
|
return nil, errors.New("did not get expected recovery information to set new seal during migration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto to Shamir. We have recovery keys; we're going to use them
|
||||||
|
// as the new barrier key
|
||||||
|
if err := c.barrier.Rekey(ctx, recoveryKey); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error rekeying barrier during migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
masterKey = recoveryKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we've swapped things around and need to ensure we
|
||||||
|
// don't migrate again
|
||||||
|
c.migrationSeal = nil
|
||||||
|
|
||||||
|
// Ensure we populate the new values
|
||||||
|
bc, err := c.seal.BarrierConfig(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error fetching barrier config after migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
if err := c.seal.SetBarrierConfig(ctx, bc); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error storing barrier config after migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.seal.RecoveryKeySupported() {
|
||||||
|
rc, err := c.seal.RecoveryConfig(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error fetching recovery config after migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
if err := c.seal.SetRecoveryConfig(ctx, rc); err != nil {
|
||||||
|
return nil, errwrap.Wrapf("error storing recovery config after migration: {{err}}", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return masterKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// unsealInternal takes in the master key and attempts to unseal the barrier.
|
// unsealInternal takes in the master key and attempts to unseal the barrier.
|
||||||
@@ -1525,6 +1614,50 @@ func lastRemoteWALImpl(c *Core) uint64 {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Core) PhysicalSealConfigs(ctx context.Context) (*SealConfig, *SealConfig, error) {
|
||||||
|
pe, err := c.physical.Get(ctx, barrierSealConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrapf("failed to fetch barrier seal configuration at migration check time: {{err}}", err)
|
||||||
|
}
|
||||||
|
if pe == nil {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
barrierConf := new(SealConfig)
|
||||||
|
|
||||||
|
if err := jsonutil.DecodeJSON(pe.Value, barrierConf); err != nil {
|
||||||
|
return nil, nil, errwrap.Wrapf("failed to decode barrier seal configuration at migration check time: {{err}}", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var recoveryConf *SealConfig
|
||||||
|
pe, err = c.physical.Get(ctx, recoverySealConfigPlaintextPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrapf("failed to fetch seal configuration at migration check time: {{err}}", err)
|
||||||
|
}
|
||||||
|
if pe != nil {
|
||||||
|
recoveryConf = &SealConfig{}
|
||||||
|
if err := jsonutil.DecodeJSON(pe.Value, recoveryConf); err != nil {
|
||||||
|
return nil, nil, errwrap.Wrapf("failed to decode seal configuration at migration check time: {{err}}", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return barrierConf, recoveryConf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Core) SetSealsForMigration(migrationSeal, newSeal Seal) {
|
||||||
|
c.stateLock.Lock()
|
||||||
|
defer c.stateLock.Unlock()
|
||||||
|
c.migrationSeal = migrationSeal
|
||||||
|
c.seal = newSeal
|
||||||
|
c.logger.Warn("entering seal migration mode; Vault will not automatically unseal even if using an autoseal")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Core) IsInSealMigration() bool {
|
||||||
|
c.stateLock.RLock()
|
||||||
|
defer c.stateLock.RUnlock()
|
||||||
|
return c.migrationSeal != nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess {
|
func (c *Core) BarrierEncryptorAccess() *BarrierEncryptorAccess {
|
||||||
return NewBarrierEncryptorAccess(c.barrier)
|
return NewBarrierEncryptorAccess(c.barrier)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-test/deep"
|
||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
log "github.com/hashicorp/go-hclog"
|
log "github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
@@ -722,11 +723,12 @@ func TestCore_HandleLogin_Token(t *testing.T) {
|
|||||||
TTL: time.Hour * 24,
|
TTL: time.Hour * 24,
|
||||||
CreationTime: te.CreationTime,
|
CreationTime: te.CreationTime,
|
||||||
NamespaceID: namespace.RootNamespaceID,
|
NamespaceID: namespace.RootNamespaceID,
|
||||||
|
CubbyholeID: te.CubbyholeID,
|
||||||
Type: logical.TokenTypeService,
|
Type: logical.TokenTypeService,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !reflect.DeepEqual(te, expect) {
|
if diff := deep.Equal(te, expect); diff != nil {
|
||||||
t.Fatalf("Bad: %#v expect: %#v", te, expect)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that we have a lease with default duration
|
// Check that we have a lease with default duration
|
||||||
@@ -1025,10 +1027,11 @@ func TestCore_HandleRequest_CreateToken_Lease(t *testing.T) {
|
|||||||
CreationTime: te.CreationTime,
|
CreationTime: te.CreationTime,
|
||||||
TTL: time.Hour * 24 * 32,
|
TTL: time.Hour * 24 * 32,
|
||||||
NamespaceID: namespace.RootNamespaceID,
|
NamespaceID: namespace.RootNamespaceID,
|
||||||
|
CubbyholeID: te.CubbyholeID,
|
||||||
Type: logical.TokenTypeService,
|
Type: logical.TokenTypeService,
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(te, expect) {
|
if diff := deep.Equal(te, expect); diff != nil {
|
||||||
t.Fatalf("Bad: %#v expect: %#v", te, expect)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that we have a lease with default duration
|
// Check that we have a lease with default duration
|
||||||
@@ -1072,10 +1075,11 @@ func TestCore_HandleRequest_CreateToken_NoDefaultPolicy(t *testing.T) {
|
|||||||
CreationTime: te.CreationTime,
|
CreationTime: te.CreationTime,
|
||||||
TTL: time.Hour * 24 * 32,
|
TTL: time.Hour * 24 * 32,
|
||||||
NamespaceID: namespace.RootNamespaceID,
|
NamespaceID: namespace.RootNamespaceID,
|
||||||
|
CubbyholeID: te.CubbyholeID,
|
||||||
Type: logical.TokenTypeService,
|
Type: logical.TokenTypeService,
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(te, expect) {
|
if diff := deep.Equal(te, expect); diff != nil {
|
||||||
t.Fatalf("Bad: %#v expect: %#v", te, expect)
|
t.Fatal(diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -276,6 +276,11 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disallow auto-unsealing when migrating
|
||||||
|
if c.IsInSealMigration() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
sealed := c.Sealed()
|
sealed := c.Sealed()
|
||||||
if !sealed {
|
if !sealed {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -62,11 +62,13 @@ type Seal interface {
|
|||||||
BarrierType() string
|
BarrierType() string
|
||||||
BarrierConfig(context.Context) (*SealConfig, error)
|
BarrierConfig(context.Context) (*SealConfig, error)
|
||||||
SetBarrierConfig(context.Context, *SealConfig) error
|
SetBarrierConfig(context.Context, *SealConfig) error
|
||||||
|
SetCachedBarrierConfig(*SealConfig)
|
||||||
|
|
||||||
RecoveryKeySupported() bool
|
RecoveryKeySupported() bool
|
||||||
RecoveryType() string
|
RecoveryType() string
|
||||||
RecoveryConfig(context.Context) (*SealConfig, error)
|
RecoveryConfig(context.Context) (*SealConfig, error)
|
||||||
SetRecoveryConfig(context.Context, *SealConfig) error
|
SetRecoveryConfig(context.Context, *SealConfig) error
|
||||||
|
SetCachedRecoveryConfig(*SealConfig)
|
||||||
SetRecoveryKey(context.Context, []byte) error
|
SetRecoveryKey(context.Context, []byte) error
|
||||||
VerifyRecoveryKey(context.Context, []byte) error
|
VerifyRecoveryKey(context.Context, []byte) error
|
||||||
}
|
}
|
||||||
@@ -160,8 +162,8 @@ func (d *defaultSeal) BarrierConfig(ctx context.Context) (*SealConfig, error) {
|
|||||||
conf.Type = d.BarrierType()
|
conf.Type = d.BarrierType()
|
||||||
case d.BarrierType():
|
case d.BarrierType():
|
||||||
default:
|
default:
|
||||||
d.core.logger.Error("barrier seal type does not match loaded type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType())
|
d.core.logger.Error("barrier seal type does not match expected type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType())
|
||||||
return nil, fmt.Errorf("barrier seal type of %q does not match loaded type of %q", conf.Type, d.BarrierType())
|
return nil, fmt.Errorf("barrier seal type of %q does not match expected type of %q", conf.Type, d.BarrierType())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for a valid seal configuration
|
// Check for a valid seal configuration
|
||||||
@@ -210,6 +212,10 @@ func (d *defaultSeal) SetBarrierConfig(ctx context.Context, config *SealConfig)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *defaultSeal) SetCachedBarrierConfig(config *SealConfig) {
|
||||||
|
d.config.Store(config)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *defaultSeal) RecoveryType() string {
|
func (d *defaultSeal) RecoveryType() string {
|
||||||
if d.PretendToAllowRecoveryKeys {
|
if d.PretendToAllowRecoveryKeys {
|
||||||
return RecoveryTypeShamir
|
return RecoveryTypeShamir
|
||||||
@@ -234,6 +240,9 @@ func (d *defaultSeal) SetRecoveryConfig(ctx context.Context, config *SealConfig)
|
|||||||
return fmt.Errorf("recovery not supported")
|
return fmt.Errorf("recovery not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *defaultSeal) SetCachedRecoveryConfig(config *SealConfig) {
|
||||||
|
}
|
||||||
|
|
||||||
func (d *defaultSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
|
func (d *defaultSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
|
||||||
if d.PretendToAllowRecoveryKeys {
|
if d.PretendToAllowRecoveryKeys {
|
||||||
if subtle.ConstantTimeCompare(key, d.PretendRecoveryKey) == 1 {
|
if subtle.ConstantTimeCompare(key, d.PretendRecoveryKey) == 1 {
|
||||||
|
|||||||
@@ -226,6 +226,10 @@ func (d *autoSeal) SetBarrierConfig(ctx context.Context, conf *SealConfig) error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *autoSeal) SetCachedBarrierConfig(config *SealConfig) {
|
||||||
|
d.barrierConfig.Store(config)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *autoSeal) RecoveryType() string {
|
func (d *autoSeal) RecoveryType() string {
|
||||||
return RecoveryTypeShamir
|
return RecoveryTypeShamir
|
||||||
}
|
}
|
||||||
@@ -340,6 +344,10 @@ func (d *autoSeal) SetRecoveryConfig(ctx context.Context, conf *SealConfig) erro
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *autoSeal) SetCachedRecoveryConfig(config *SealConfig) {
|
||||||
|
d.recoveryConfig.Store(config)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *autoSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
|
func (d *autoSeal) VerifyRecoveryKey(ctx context.Context, key []byte) error {
|
||||||
if key == nil {
|
if key == nil {
|
||||||
return fmt.Errorf("recovery key to verify is nil")
|
return fmt.Errorf("recovery key to verify is nil")
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func performTestSealUnwrapper(t *testing.T, phys physical.Backend, logger log.Lo
|
|||||||
// Save the original for comparison later
|
// Save the original for comparison later
|
||||||
origBytes := make([]byte, len(entry.Value))
|
origBytes := make([]byte, len(entry.Value))
|
||||||
copy(origBytes, entry.Value)
|
copy(origBytes, entry.Value)
|
||||||
se := &physical.SealWrapEntry{
|
se := &physical.EncryptedBlobInfo{
|
||||||
Ciphertext: entry.Value,
|
Ciphertext: entry.Value,
|
||||||
}
|
}
|
||||||
seb, err := proto.Marshal(se)
|
seb, err := proto.Marshal(se)
|
||||||
|
|||||||
@@ -255,7 +255,7 @@ func TestCoreUnseal(core *Core, key []byte) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) {
|
func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) {
|
||||||
return core.UnsealWithRecoveryKeys(context.Background(), key)
|
return core.UnsealWithRecoveryKeys(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestCoreUnsealed returns a pure in-memory core that is already
|
// TestCoreUnsealed returns a pure in-memory core that is already
|
||||||
@@ -884,6 +884,7 @@ type TestListener struct {
|
|||||||
|
|
||||||
type TestClusterCore struct {
|
type TestClusterCore struct {
|
||||||
*Core
|
*Core
|
||||||
|
CoreConfig *CoreConfig
|
||||||
Client *api.Client
|
Client *api.Client
|
||||||
Handler http.Handler
|
Handler http.Handler
|
||||||
Listeners []*TestListener
|
Listeners []*TestListener
|
||||||
@@ -1259,28 +1260,31 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
}
|
}
|
||||||
|
|
||||||
cores := []*Core{}
|
cores := []*Core{}
|
||||||
|
coreConfigs := []*CoreConfig{}
|
||||||
for i := 0; i < numCores; i++ {
|
for i := 0; i < numCores; i++ {
|
||||||
coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port)
|
localConfig := coreConfig.Clone()
|
||||||
if coreConfig.ClusterAddr != "" {
|
localConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port)
|
||||||
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105)
|
if localConfig.ClusterAddr != "" {
|
||||||
|
localConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if opts.SealFunc is provided, use that to generate a seal for the config instead
|
// if opts.SealFunc is provided, use that to generate a seal for the config instead
|
||||||
if opts != nil && opts.SealFunc != nil {
|
if opts != nil && opts.SealFunc != nil {
|
||||||
coreConfig.Seal = opts.SealFunc()
|
localConfig.Seal = opts.SealFunc()
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts != nil && opts.Logger != nil {
|
if opts != nil && opts.Logger != nil {
|
||||||
coreConfig.Logger = opts.Logger.Named(fmt.Sprintf("core%d", i))
|
localConfig.Logger = opts.Logger.Named(fmt.Sprintf("core%d", i))
|
||||||
}
|
}
|
||||||
|
|
||||||
coreConfig.LicensingConfig = testGetLicensingConfig(pubKey)
|
localConfig.LicensingConfig = testGetLicensingConfig(pubKey)
|
||||||
|
|
||||||
c, err := NewCore(coreConfig)
|
c, err := NewCore(localConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
cores = append(cores, c)
|
cores = append(cores, c)
|
||||||
|
coreConfigs = append(coreConfigs, localConfig)
|
||||||
if opts != nil && opts.HandlerFunc != nil {
|
if opts != nil && opts.HandlerFunc != nil {
|
||||||
handlers[i] = opts.HandlerFunc(&HandlerProperties{
|
handlers[i] = opts.HandlerFunc(&HandlerProperties{
|
||||||
Core: c,
|
Core: c,
|
||||||
@@ -1288,6 +1292,12 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
})
|
})
|
||||||
servers[i].Handler = handlers[i]
|
servers[i].Handler = handlers[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set this in case the Seal was manually set before the core was
|
||||||
|
// created
|
||||||
|
if localConfig.Seal != nil {
|
||||||
|
localConfig.Seal.SetCore(c)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@@ -1304,8 +1314,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
if numCores > 1 {
|
for i := 0; i < numCores; i++ {
|
||||||
for i := 1; i < numCores; i++ {
|
if coreConfigs[i].ClusterAddr != "" {
|
||||||
cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i]))
|
cores[i].SetClusterListenerAddrs(clusterAddrGen(listeners[i]))
|
||||||
cores[i].SetClusterHandler(handlers[i])
|
cores[i].SetClusterHandler(handlers[i])
|
||||||
}
|
}
|
||||||
@@ -1444,6 +1454,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
for i := 0; i < numCores; i++ {
|
for i := 0; i < numCores; i++ {
|
||||||
tcc := &TestClusterCore{
|
tcc := &TestClusterCore{
|
||||||
Core: cores[i],
|
Core: cores[i],
|
||||||
|
CoreConfig: coreConfigs[i],
|
||||||
ServerKey: certInfoSlice[i].key,
|
ServerKey: certInfoSlice[i].key,
|
||||||
ServerKeyPEM: certInfoSlice[i].keyPEM,
|
ServerKeyPEM: certInfoSlice[i].keyPEM,
|
||||||
ServerCert: certInfoSlice[i].cert,
|
ServerCert: certInfoSlice[i].cert,
|
||||||
|
|||||||
Reference in New Issue
Block a user