Same seal migration oss (#10224)

* Refactoring and test improvements.

* Support migrating from a given type of autoseal to that same type but with different parameters.
This commit is contained in:
Nick Cabatoff
2020-10-23 14:16:04 -04:00
committed by GitHub
parent c787c97cec
commit f7c384fd4c
12 changed files with 684 additions and 560 deletions

View File

@@ -1100,7 +1100,9 @@ func (c *ServerCommand) Run(args []string) int {
Logger: c.logger.Named("shamir"), Logger: c.logger.Named("shamir"),
}), }),
}) })
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) var sealInfoKeys []string
var sealInfoMap = map[string]string{}
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger)
if sealConfigError != nil { if sealConfigError != nil {
if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
c.UI.Error(fmt.Sprintf( c.UI.Error(fmt.Sprintf(
@@ -1116,12 +1118,18 @@ func (c *ServerCommand) Run(args []string) int {
}) })
} }
var infoPrefix = ""
if configSeal.Disabled { if configSeal.Disabled {
unwrapSeal = seal unwrapSeal = seal
infoPrefix = "Old "
} else { } else {
barrierSeal = seal barrierSeal = seal
barrierWrapper = wrapper barrierWrapper = wrapper
} }
for _, k := range sealInfoKeys {
infoKeys = append(infoKeys, infoPrefix+k)
info[infoPrefix+k] = sealInfoMap[k]
}
// Ensure that the seal finalizer is called, even if using verify-only // Ensure that the seal finalizer is called, even if using verify-only
defer func() { defer func() {
@@ -1570,7 +1578,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
// Vault cluster with multiple servers is configured with auto-unseal but is // Vault cluster with multiple servers is configured with auto-unseal but is
// uninitialized. Once one server initializes the storage backend, this // uninitialized. Once one server initializes the storage backend, this
// goroutine will pick up the unseal keys and unseal this instance. // goroutine will pick up the unseal keys and unseal this instance.
if !core.IsInSealMigration() { if !core.IsInSealMigrationMode() {
go func() { go func() {
for { for {
err := core.UnsealWithStoredKeys(context.Background()) err := core.UnsealWithStoredKeys(context.Background())

View File

@@ -2,6 +2,7 @@ package sealhelper
import ( import (
"path" "path"
"strconv"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api" "github.com/hashicorp/vault/api"
@@ -20,7 +21,7 @@ type TransitSealServer struct {
*vault.TestCluster *vault.TestCluster
} }
func NewTransitSealServer(t testing.T) *TransitSealServer { func NewTransitSealServer(t testing.T, idx int) *TransitSealServer {
conf := &vault.CoreConfig{ conf := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{ LogicalBackends: map[string]logical.Factory{
"transit": transit.Factory, "transit": transit.Factory,
@@ -29,7 +30,7 @@ func NewTransitSealServer(t testing.T) *TransitSealServer {
opts := &vault.TestClusterOptions{ opts := &vault.TestClusterOptions{
NumCores: 1, NumCores: 1,
HandlerFunc: http.Handler, HandlerFunc: http.Handler,
Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit"), Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(idx)),
} }
teststorage.InmemBackendSetup(conf, opts) teststorage.InmemBackendSetup(conf, opts)
cluster := vault.NewTestCluster(t, conf, opts) cluster := vault.NewTestCluster(t, conf, opts)

View File

@@ -101,20 +101,6 @@ func handleSysUnseal(core *vault.Core) http.Handler {
return return
} }
isInSealMigration := core.IsInSealMigration()
if !req.Migrate && isInSealMigration {
respondError(
w, http.StatusBadRequest,
errors.New("'migrate' parameter must be set true in JSON body when in seal migration mode"))
return
}
if req.Migrate && !isInSealMigration {
respondError(
w, http.StatusBadRequest,
errors.New("'migrate' parameter set true in JSON body when not in seal migration mode"))
return
}
if req.Key == "" { if req.Key == "" {
respondError( respondError(
w, http.StatusBadRequest, w, http.StatusBadRequest,
@@ -138,9 +124,10 @@ func handleSysUnseal(core *vault.Core) http.Handler {
} }
} }
// Attempt the unseal // Attempt the unseal. If migrate was specified, the key should correspond
if core.SealAccess().RecoveryKeySupported() { // to the old seal.
_, err = core.UnsealWithRecoveryKeys(key) if req.Migrate {
_, err = core.UnsealMigrate(key)
} else { } else {
_, err = core.Unseal(key) _, err = core.Unseal(key)
} }
@@ -231,7 +218,7 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req
Progress: progress, Progress: progress,
Nonce: nonce, Nonce: nonce,
Version: version.GetVersion().VersionNumber(), Version: version.GetVersion().VersionNumber(),
Migration: core.IsInSealMigration(), Migration: core.IsInSealMigrationMode() && !core.IsSealMigrated(),
ClusterName: clusterName, ClusterName: clusterName,
ClusterID: clusterID, ClusterID: clusterID,
RecoverySeal: core.SealAccess().RecoveryKeySupported(), RecoverySeal: core.SealAccess().RecoveryKeySupported(),

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,3 @@
// +build !enterprise
package sealmigration package sealmigration
import ( import (
@@ -32,23 +30,24 @@ func TestSealMigration_TransitToShamir_Pre14(t *testing.T) {
func testSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { func testSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
// Create the transit server. // Create the transit server.
tss := sealhelper.NewTransitSealServer(t) tss := sealhelper.NewTransitSealServer(t, 0)
defer func() { defer func() {
if tss != nil { if tss != nil {
tss.Cleanup() tss.Cleanup()
} }
}() }()
tss.MakeKey(t, "transit-seal-key") sealKeyName := "transit-seal-key"
tss.MakeKey(t, sealKeyName)
// Initialize the backend with transit. // Initialize the backend with transit.
cluster, _, transitSeal := initializeTransit(t, logger, storage, basePort, tss) cluster, opts := initializeTransit(t, logger, storage, basePort, tss, sealKeyName)
rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
cluster.EnsureCoresSealed(t) cluster.EnsureCoresSealed(t)
cluster.Cleanup() cluster.Cleanup()
storage.Cleanup(t, cluster) storage.Cleanup(t, cluster)
// Migrate the backend from transit to shamir // Migrate the backend from transit to shamir
migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, transitSeal, rootToken, recoveryKeys) migrateFromTransitToShamir_Pre14(t, logger, storage, basePort, tss, opts.SealFunc, rootToken, recoveryKeys)
// Now that migration is done, we can nuke the transit server, since we // Now that migration is done, we can nuke the transit server, since we
// can unseal without it. // can unseal without it.
@@ -60,25 +59,20 @@ func testSealMigrationTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, s
runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
} }
func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, transitSeal vault.Seal, rootToken string, recoveryKeys [][]byte) { func migrateFromTransitToShamir_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
tss *sealhelper.TransitSealServer, sealFunc func() vault.Seal, rootToken string, recoveryKeys [][]byte) {
var baseClusterPort = basePort + 10 var baseClusterPort = basePort + 10
var conf = vault.CoreConfig{ var conf vault.CoreConfig
Logger: logger.Named("migrateFromTransitToShamir"),
// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
// equivalent of doing the following in HCL:
// seal "transit" {
// // ...
// disabled = "true"
// }
UnwrapSeal: transitSeal,
}
var opts = vault.TestClusterOptions{ var opts = vault.TestClusterOptions{
Logger: logger.Named("migrateFromTransitToShamir"),
HandlerFunc: vaulthttp.Handler, HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores, NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort, BaseClusterListenPort: baseClusterPort,
SkipInit: true, SkipInit: true,
UnwrapSealFunc: sealFunc,
} }
storage.Setup(&conf, &opts) storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts)

View File

@@ -24,7 +24,7 @@ import (
) )
const ( const (
numTestCores = 5 numTestCores = 3
keyShares = 3 keyShares = 3
keyThreshold = 3 keyThreshold = 3
@@ -32,6 +32,7 @@ const (
basePort_TransitToShamir_Pre14 = 21000 basePort_TransitToShamir_Pre14 = 21000
basePort_ShamirToTransit_Post14 = 22000 basePort_ShamirToTransit_Post14 = 22000
basePort_TransitToShamir_Post14 = 23000 basePort_TransitToShamir_Post14 = 23000
basePort_TransitToTransit = 24000
) )
type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int)
@@ -52,7 +53,6 @@ func testVariousBackends(t *testing.T, tf testFunc, basePort int, includeRaft bo
t.Run("file", func(t *testing.T) { t.Run("file", func(t *testing.T) {
t.Parallel() t.Parallel()
t.Skip("fails intermittently")
logger := logger.Named("file") logger := logger.Named("file")
storage, cleanup := teststorage.MakeReusableStorage( storage, cleanup := teststorage.MakeReusableStorage(
@@ -103,31 +103,28 @@ func testSealMigrationShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, s
// Initialize the backend using shamir // Initialize the backend using shamir
cluster, _ := initializeShamir(t, logger, storage, basePort) cluster, _ := initializeShamir(t, logger, storage, basePort)
rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys rootToken, barrierKeys := cluster.RootToken, cluster.BarrierKeys
cluster.EnsureCoresSealed(t)
cluster.Cleanup() cluster.Cleanup()
storage.Cleanup(t, cluster) storage.Cleanup(t, cluster)
// Create the transit server. // Create the transit server.
tss := sealhelper.NewTransitSealServer(t) tss := sealhelper.NewTransitSealServer(t, 0)
defer func() { defer func() {
tss.EnsureCoresSealed(t) tss.EnsureCoresSealed(t)
tss.Cleanup() tss.Cleanup()
}() }()
tss.MakeKey(t, "transit-seal-key") tss.MakeKey(t, "transit-seal-key-1")
// Migrate the backend from shamir to transit. Note that the barrier keys // Migrate the backend from shamir to transit. Note that the barrier keys
// are now the recovery keys. // are now the recovery keys.
transitSeal := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys) sealFunc := migrateFromShamirToTransit_Pre14(t, logger, storage, basePort, tss, rootToken, barrierKeys)
// Run the backend with transit. // Run the backend with transit.
runTransit(t, logger, storage, basePort, rootToken, transitSeal) runAutoseal(t, logger, storage, basePort, rootToken, sealFunc)
} }
func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) vault.Seal { func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, rootToken string, recoveryKeys [][]byte) func() vault.Seal {
var baseClusterPort = basePort + 10 var baseClusterPort = basePort + 10
var transitSeal vault.Seal
var conf = vault.CoreConfig{} var conf = vault.CoreConfig{}
var opts = vault.TestClusterOptions{ var opts = vault.TestClusterOptions{
Logger: logger.Named("migrateFromShamirToTransit"), Logger: logger.Named("migrateFromShamirToTransit"),
@@ -138,8 +135,7 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
SkipInit: true, SkipInit: true,
// N.B. Providing a transit seal puts us in migration mode. // N.B. Providing a transit seal puts us in migration mode.
SealFunc: func() vault.Seal { SealFunc: func() vault.Seal {
transitSeal = tss.MakeSeal(t, "transit-seal-key") return tss.MakeSeal(t, "transit-seal-key")
return transitSeal
}, },
} }
storage.Setup(&conf, &opts) storage.Setup(&conf, &opts)
@@ -159,6 +155,8 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
// Wait for migration to finish. // Wait for migration to finish.
awaitMigration(t, leader.Client) awaitMigration(t, leader.Client)
verifySealConfigTransit(t, leader)
// Read the secrets // Read the secrets
secret, err := leader.Client.Logical().Read("kv-wrapped/foo") secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
if err != nil { if err != nil {
@@ -176,17 +174,7 @@ func migrateFromShamirToTransit_Pre14(t *testing.T, logger hclog.Logger, storage
t.Fatal(err) t.Fatal(err)
} }
// Make sure the seal configs were updated correctly. return opts.SealFunc
b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
cluster.EnsureCoresSealed(t)
return transitSeal
} }
// TestSealMigration_ShamirToTransit_Post14 tests shamir-to-transit seal // TestSealMigration_ShamirToTransit_Post14 tests shamir-to-transit seal
@@ -202,59 +190,25 @@ func testSealMigrationShamirToTransit_Post14(t *testing.T, logger hclog.Logger,
cluster, opts := initializeShamir(t, logger, storage, basePort) cluster, opts := initializeShamir(t, logger, storage, basePort)
// Create the transit server. // Create the transit server.
tss := sealhelper.NewTransitSealServer(t) tss := sealhelper.NewTransitSealServer(t, 0)
defer func() { defer tss.Cleanup()
tss.EnsureCoresSealed(t) sealKeyName := "transit-seal-key-1"
tss.Cleanup() tss.MakeKey(t, sealKeyName)
}()
tss.MakeKey(t, "transit-seal-key")
// Migrate the backend from shamir to transit. // Migrate the backend from shamir to transit.
transitSeal := migrateFromShamirToTransit_Post14(t, logger, storage, basePort, tss, cluster, opts)
cluster.EnsureCoresSealed(t)
cluster.Cleanup()
storage.Cleanup(t, cluster)
// Run the backend with transit.
runTransit(t, logger, storage, basePort, cluster.RootToken, transitSeal)
}
func migrateFromShamirToTransit_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, cluster *vault.TestCluster, opts *vault.TestClusterOptions) vault.Seal {
// N.B. Providing a transit seal puts us in migration mode.
var transitSeal vault.Seal
opts.SealFunc = func() vault.Seal { opts.SealFunc = func() vault.Seal {
transitSeal = tss.MakeSeal(t, "transit-seal-key") return tss.MakeSeal(t, sealKeyName)
return transitSeal
}
modifyCoreConfig := func(tcc *vault.TestClusterCore) {
tcc.CoreConfig.Seal = transitSeal
} }
// Restart each follower with the new config, and migrate to Transit. // Restart each follower with the new config, and migrate to Transit.
// Note that the barrier keys are being used as recovery keys. // Note that the barrier keys are being used as recovery keys.
leaderIdx := migratePost14(t, logger, storage, cluster, opts, cluster.BarrierKeys, modifyCoreConfig) leaderIdx := migratePost14(t, storage, cluster, opts, cluster.BarrierKeys)
leader := cluster.Cores[leaderIdx] validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
cluster.Cleanup()
storage.Cleanup(t, cluster)
// Read the secret // Run the backend with transit.
secret, err := leader.Client.Logical().Read("kv-wrapped/foo") runAutoseal(t, logger, storage, basePort, cluster.RootToken, opts.SealFunc)
if err != nil {
t.Fatal(err)
}
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff)
}
// Make sure the seal configs were updated correctly.
b, r, err := leader.Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
return transitSeal
} }
// TestSealMigration_TransitToShamir_Post14 tests transit-to-shamir seal // TestSealMigration_TransitToShamir_Post14 tests transit-to-shamir seal
@@ -267,21 +221,25 @@ func TestSealMigration_TransitToShamir_Post14(t *testing.T) {
func testSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) { func testSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) {
// Create the transit server. // Create the transit server.
tss := sealhelper.NewTransitSealServer(t) tss := sealhelper.NewTransitSealServer(t, 0)
defer func() { defer func() {
if tss != nil { if tss != nil {
tss.Cleanup() tss.Cleanup()
} }
}() }()
tss.MakeKey(t, "transit-seal-key") sealKeyName := "transit-seal-key-1"
tss.MakeKey(t, sealKeyName)
// Initialize the backend with transit. // Initialize the backend with transit.
cluster, opts, transitSeal := initializeTransit(t, logger, storage, basePort, tss) cluster, opts := initializeTransit(t, logger, storage, basePort, tss, sealKeyName)
rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys rootToken, recoveryKeys := cluster.RootToken, cluster.RecoveryKeys
// Migrate the backend from transit to shamir // Migrate the backend from transit to shamir
migrateFromTransitToShamir_Post14(t, logger, storage, basePort, tss, transitSeal, cluster, opts) opts.UnwrapSealFunc = opts.SealFunc
cluster.EnsureCoresSealed(t) opts.SealFunc = func() vault.Seal { return nil }
leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
validateMigration(t, storage, cluster, leaderIdx, verifySealConfigShamir)
cluster.Cleanup() cluster.Cleanup()
storage.Cleanup(t, cluster) storage.Cleanup(t, cluster)
@@ -295,27 +253,12 @@ func testSealMigrationTransitToShamir_Post14(t *testing.T, logger hclog.Logger,
runShamir(t, logger, storage, basePort, rootToken, recoveryKeys) runShamir(t, logger, storage, basePort, rootToken, recoveryKeys)
} }
func migrateFromTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer, transitSeal vault.Seal, cluster *vault.TestCluster, opts *vault.TestClusterOptions) { func validateMigration(t *testing.T, storage teststorage.ReusableStorage,
cluster *vault.TestCluster, leaderIdx int, f func(t *testing.T, core *vault.TestClusterCore)) {
t.Helper()
opts.SealFunc = nil
modifyCoreConfig := func(tcc *vault.TestClusterCore) {
// Nil out the seal so it will be initialized as shamir.
tcc.CoreConfig.Seal = nil
// N.B. Providing an UnwrapSeal puts us in migration mode. This is the
// equivalent of doing the following in HCL:
// seal "transit" {
// // ...
// disabled = "true"
// }
tcc.CoreConfig.UnwrapSeal = transitSeal
}
// Restart each follower with the new config, and migrate to Shamir.
leaderIdx := migratePost14(t, logger, storage, cluster, opts, cluster.RecoveryKeys, modifyCoreConfig)
leader := cluster.Cores[leaderIdx] leader := cluster.Cores[leaderIdx]
// Read the secret
secret, err := leader.Client.Logical().Read("kv-wrapped/foo") secret, err := leader.Client.Logical().Read("kv-wrapped/foo")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -334,27 +277,70 @@ func migrateFromTransitToShamir_Post14(t *testing.T, logger hclog.Logger, storag
testhelpers.WaitForRaftApply(t, core, appliedIndex) testhelpers.WaitForRaftApply(t, core, appliedIndex)
} }
// Make sure the seal configs were updated correctly. f(t, core)
b, r, err := core.Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
} }
} }
func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, cluster *vault.TestCluster, opts *vault.TestClusterOptions, unsealKeys [][]byte, modifyCoreConfig func(*vault.TestClusterCore)) int { // TestSealMigration_TransitToTransit tests transit-to-shamir seal
// migration, using the post-1.4 method of bring individual nodes in the
// cluster to do the migration.
func TestSealMigration_TransitToTransit(t *testing.T) {
testVariousBackends(t, testSealMigration_TransitToTransit, basePort_TransitToTransit, true)
}
func testSealMigration_TransitToTransit(t *testing.T, logger hclog.Logger,
storage teststorage.ReusableStorage, basePort int) {
// Create the transit server.
tss1 := sealhelper.NewTransitSealServer(t, 0)
defer func() {
if tss1 != nil {
tss1.Cleanup()
}
}()
sealKeyName := "transit-seal-key-1"
tss1.MakeKey(t, sealKeyName)
// Initialize the backend with transit.
cluster, opts := initializeTransit(t, logger, storage, basePort, tss1, sealKeyName)
rootToken := cluster.RootToken
// Create the transit server.
tss2 := sealhelper.NewTransitSealServer(t, 1)
defer func() {
tss2.Cleanup()
}()
tss2.MakeKey(t, "transit-seal-key-2")
// Migrate the backend from transit to transit.
opts.UnwrapSealFunc = opts.SealFunc
opts.SealFunc = func() vault.Seal {
return tss2.MakeSeal(t, "transit-seal-key-2")
}
leaderIdx := migratePost14(t, storage, cluster, opts, cluster.RecoveryKeys)
validateMigration(t, storage, cluster, leaderIdx, verifySealConfigTransit)
cluster.Cleanup()
storage.Cleanup(t, cluster)
// Now that migration is done, we can nuke the transit server, since we
// can unseal without it.
tss1.Cleanup()
tss1 = nil
// Run the backend with transit.
runAutoseal(t, logger, storage, basePort, rootToken, opts.SealFunc)
}
func migratePost14(t *testing.T, storage teststorage.ReusableStorage, cluster *vault.TestCluster,
opts *vault.TestClusterOptions, unsealKeys [][]byte) int {
cluster.Logger = cluster.Logger.Named("migration")
// Restart each follower with the new config, and migrate. // Restart each follower with the new config, and migrate.
for i := 1; i < len(cluster.Cores); i++ { for i := 1; i < len(cluster.Cores); i++ {
cluster.StopCore(t, i) cluster.StopCore(t, i)
if storage.IsRaft { if storage.IsRaft {
teststorage.CloseRaftStorage(t, cluster, i) teststorage.CloseRaftStorage(t, cluster, i)
} }
modifyCoreConfig(cluster.Cores[i])
cluster.StartCore(t, i, opts) cluster.StartCore(t, i, opts)
unsealMigrate(t, cluster.Cores[i].Client, unsealKeys, true) unsealMigrate(t, cluster.Cores[i].Client, unsealKeys, true)
@@ -385,7 +371,7 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
} }
leader := cluster.Cores[leaderIdx] leader := cluster.Cores[leaderIdx]
// Wait for migration to occur on one of the 2 unsealed nodes // Wait for migration to occur on the leader
awaitMigration(t, leader.Client) awaitMigration(t, leader.Client)
var appliedIndex uint64 var appliedIndex uint64
@@ -400,10 +386,9 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
teststorage.CloseRaftStorage(t, cluster, 0) teststorage.CloseRaftStorage(t, cluster, 0)
} }
// Modify the core // Bring core 0 back up; we still have the seal migration config in place,
modifyCoreConfig(cluster.Cores[0]) // but now that migration has been performed we should be able to unseal
// with the new seal and without using the `migrate` unseal option.
// Bring core 0 back up
cluster.StartCore(t, 0, opts) cluster.StartCore(t, 0, opts)
unseal(t, cluster.Cores[0].Client, unsealKeys) unseal(t, cluster.Cores[0].Client, unsealKeys)
@@ -420,16 +405,16 @@ func migratePost14(t *testing.T, logger hclog.Logger, storage teststorage.Reusab
func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) { func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServerAvailable bool) {
t.Helper() t.Helper()
for i, key := range keys { if err := attemptUnseal(client, keys); err == nil {
// Try to unseal with missing "migrate" parameter t.Fatal("expected error due to lack of migrate parameter")
_, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ }
Key: base64.StdEncoding.EncodeToString(key), if err := attemptUnsealMigrate(client, keys, transitServerAvailable); err != nil {
}) t.Fatal(err)
if err == nil { }
t.Fatal("expected error due to lack of migrate parameter") }
}
// Unseal with "migrate" parameter func attemptUnsealMigrate(client *api.Client, keys [][]byte, transitServerAvailable bool) error {
for i, key := range keys {
resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
Key: base64.StdEncoding.EncodeToString(key), Key: base64.StdEncoding.EncodeToString(key),
Migrate: true, Migrate: true,
@@ -438,26 +423,27 @@ func unsealMigrate(t *testing.T, client *api.Client, keys [][]byte, transitServe
if i < keyThreshold-1 { if i < keyThreshold-1 {
// Not enough keys have been provided yet. // Not enough keys have been provided yet.
if err != nil { if err != nil {
t.Fatal(err) return err
} }
} else { } else {
if transitServerAvailable { if transitServerAvailable {
// The transit server is running. // The transit server is running.
if err != nil { if err != nil {
t.Fatal(err) return err
} }
if resp == nil || resp.Sealed { if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp) return fmt.Errorf("expected unsealed state; got %#v", resp)
} }
} else { } else {
// The transit server is stopped. // The transit server is stopped.
if err == nil { if err == nil {
t.Fatal("expected error due to transit server being stopped.") return fmt.Errorf("expected error due to transit server being stopped.")
} }
} }
break break
} }
} }
return nil
} }
// awaitMigration waits for migration to finish. // awaitMigration waits for migration to finish.
@@ -484,6 +470,12 @@ func awaitMigration(t *testing.T, client *api.Client) {
func unseal(t *testing.T, client *api.Client, keys [][]byte) { func unseal(t *testing.T, client *api.Client, keys [][]byte) {
t.Helper() t.Helper()
if err := attemptUnseal(client, keys); err != nil {
t.Fatal(err)
}
}
func attemptUnseal(client *api.Client, keys [][]byte) error {
for i, key := range keys { for i, key := range keys {
resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ resp, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{
@@ -492,18 +484,41 @@ func unseal(t *testing.T, client *api.Client, keys [][]byte) {
if i < keyThreshold-1 { if i < keyThreshold-1 {
// Not enough keys have been provided yet. // Not enough keys have been provided yet.
if err != nil { if err != nil {
t.Fatal(err) return err
} }
} else { } else {
if err != nil { if err != nil {
t.Fatal(err) return err
} }
if resp == nil || resp.Sealed { if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp) return fmt.Errorf("expected unsealed state; got %#v", resp)
} }
break break
} }
} }
return nil
}
func verifySealConfigShamir(t *testing.T, core *vault.TestClusterCore) {
t.Helper()
b, r, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, keyShares, keyThreshold, 1)
if r != nil {
t.Fatal("should not have recovery config for shamir")
}
}
func verifySealConfigTransit(t *testing.T, core *vault.TestClusterCore) {
t.Helper()
b, r, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Transit, 1, 1, 1)
verifyBarrierConfig(t, r, wrapping.Shamir, keyShares, keyThreshold, 0)
} }
// verifyBarrierConfig verifies that a barrier configuration is correct. // verifyBarrierConfig verifies that a barrier configuration is correct.
@@ -554,7 +569,7 @@ func initializeShamir(t *testing.T, logger hclog.Logger, storage teststorage.Reu
} else { } else {
cluster.UnsealCores(t) cluster.UnsealCores(t)
} }
testhelpers.WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) testhelpers.WaitForActiveNodeAndStandbys(t, cluster)
err := client.Sys().Mount("kv-wrapped", &api.MountInput{ err := client.Sys().Mount("kv-wrapped", &api.MountInput{
SealWrap: true, SealWrap: true,
@@ -640,29 +655,25 @@ func runShamir(t *testing.T, logger hclog.Logger, storage teststorage.ReusableSt
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff) t.Fatal(diff)
} }
// Seal the cluster
cluster.EnsureCoresSealed(t)
} }
// initializeTransit initializes a brand new backend storage with Transit. // initializeTransit initializes a brand new backend storage with Transit.
func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, tss *sealhelper.TransitSealServer) (*vault.TestCluster, *vault.TestClusterOptions, vault.Seal) { func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int,
tss *sealhelper.TransitSealServer, sealKeyName string) (*vault.TestCluster, *vault.TestClusterOptions) {
t.Helper() t.Helper()
var transitSeal vault.Seal
var baseClusterPort = basePort + 10 var baseClusterPort = basePort + 10
// Start the cluster // Start the cluster
var conf = vault.CoreConfig{} var conf = vault.CoreConfig{}
var opts = vault.TestClusterOptions{ var opts = vault.TestClusterOptions{
Logger: logger, Logger: logger.Named("initializeTransit"),
HandlerFunc: vaulthttp.Handler, HandlerFunc: vaulthttp.Handler,
NumCores: numTestCores, NumCores: numTestCores,
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort, BaseClusterListenPort: baseClusterPort,
SealFunc: func() vault.Seal { SealFunc: func() vault.Seal {
transitSeal = tss.MakeSeal(t, "transit-seal-key") return tss.MakeSeal(t, sealKeyName)
return transitSeal
}, },
} }
storage.Setup(&conf, &opts) storage.Setup(&conf, &opts)
@@ -698,16 +709,15 @@ func initializeTransit(t *testing.T, logger hclog.Logger, storage teststorage.Re
t.Fatal(err) t.Fatal(err)
} }
return cluster, &opts, transitSeal return cluster, &opts
} }
func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, transitSeal vault.Seal) { func runAutoseal(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int, rootToken string, sealFunc func() vault.Seal) {
var baseClusterPort = basePort + 10 var baseClusterPort = basePort + 10
// Start the cluster // Start the cluster
var conf = vault.CoreConfig{ var conf = vault.CoreConfig{}
Seal: transitSeal,
}
var opts = vault.TestClusterOptions{ var opts = vault.TestClusterOptions{
Logger: logger.Named("runTransit"), Logger: logger.Named("runTransit"),
HandlerFunc: vaulthttp.Handler, HandlerFunc: vaulthttp.Handler,
@@ -715,6 +725,7 @@ func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableS
BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort), BaseListenAddress: fmt.Sprintf("127.0.0.1:%d", basePort),
BaseClusterListenPort: baseClusterPort, BaseClusterListenPort: baseClusterPort,
SkipInit: true, SkipInit: true,
SealFunc: sealFunc,
} }
storage.Setup(&conf, &opts) storage.Setup(&conf, &opts)
cluster := vault.NewTestCluster(t, &conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts)
@@ -771,9 +782,6 @@ func runTransit(t *testing.T, logger hclog.Logger, storage teststorage.ReusableS
if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 { if diff := deep.Equal(secret.Data, map[string]interface{}{"zork": "quux"}); len(diff) > 0 {
t.Fatal(diff) t.Fatal(diff)
} }
// Seal the cluster
cluster.EnsureCoresSealed(t)
} }
// joinRaftFollowers unseals the leader, and then joins-and-unseals the // joinRaftFollowers unseals the leader, and then joins-and-unseals the

View File

@@ -39,7 +39,7 @@ type GenerateRootStrategy interface {
type generateStandardRootToken struct{} type generateStandardRootToken struct{}
func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { func (g generateStandardRootToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error {
masterKey, err := c.unsealKeyToMasterKey(ctx, combinedKey) masterKey, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey)
if err != nil { if err != nil {
return errwrap.Wrapf("unable to authenticate: {{err}}", err) return errwrap.Wrapf("unable to authenticate: {{err}}", err)
} }

View File

@@ -21,7 +21,7 @@ type generateRecoveryToken struct {
} }
func (g *generateRecoveryToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error { func (g *generateRecoveryToken) authenticate(ctx context.Context, c *Core, combinedKey []byte) error {
key, err := c.unsealKeyToMasterKey(ctx, combinedKey) key, err := c.unsealKeyToMasterKeyPostUnseal(ctx, combinedKey)
if err != nil { if err != nil {
return errwrap.Wrapf("unable to authenticate: {{err}}", err) return errwrap.Wrapf("unable to authenticate: {{err}}", err)
} }

View File

@@ -413,10 +413,13 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
} }
// Disallow auto-unsealing when migrating // Disallow auto-unsealing when migrating
if c.IsInSealMigration() { if c.IsInSealMigrationMode() && !c.IsSealMigrated() {
return NewNonFatalError(errors.New("cannot auto-unseal during seal migration")) return NewNonFatalError(errors.New("cannot auto-unseal during seal migration"))
} }
c.stateLock.Lock()
defer c.stateLock.Unlock()
sealed := c.Sealed() sealed := c.Sealed()
if !sealed { if !sealed {
c.Logger().Warn("attempted unseal with stored keys, but vault is already unsealed") c.Logger().Warn("attempted unseal with stored keys, but vault is already unsealed")
@@ -434,27 +437,22 @@ func (c *Core) UnsealWithStoredKeys(ctx context.Context) error {
if len(keys) == 0 { if len(keys) == 0 {
return NewNonFatalError(errors.New("stored unseal keys are supported, but none were found")) return NewNonFatalError(errors.New("stored unseal keys are supported, but none were found"))
} }
if len(keys) != 1 {
unsealed := false return NewNonFatalError(errors.New("expected exactly one stored key"))
keysUsed := 0
for _, key := range keys {
unsealed, err = c.Unseal(key)
if err != nil {
return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err))
}
keysUsed++
if unsealed {
break
}
} }
if !unsealed { err = c.unsealInternal(ctx, keys[0])
if err != nil {
return NewNonFatalError(errwrap.Wrapf("unseal with stored key failed: {{err}}", err))
}
if c.Sealed() {
// This most likely means that the user configured Vault to only store a // This most likely means that the user configured Vault to only store a
// subset of the required threshold of keys. We still consider this a // subset of the required threshold of keys. We still consider this a
// "success", since trying again would yield the same result. // "success", since trying again would yield the same result.
c.Logger().Warn("vault still sealed after using stored unseal keys", "stored_keys_used", keysUsed) c.Logger().Warn("vault still sealed after using stored unseal key")
} else { } else {
c.Logger().Info("unsealed with stored keys", "stored_keys_used", keysUsed) c.Logger().Info("unsealed with stored key")
} }
return nil return nil

View File

@@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical"
@@ -401,6 +402,36 @@ func (s *SealConfig) Clone() *SealConfig {
return ret return ret
} }
type ErrEncrypt struct {
Err error
}
var _ error = &ErrEncrypt{}
func (e *ErrEncrypt) Error() string {
return e.Err.Error()
}
func (e *ErrEncrypt) Is(target error) bool {
_, ok := target.(*ErrEncrypt)
return ok || errors.Is(e.Err, target)
}
type ErrDecrypt struct {
Err error
}
var _ error = &ErrDecrypt{}
func (e *ErrDecrypt) Error() string {
return e.Err.Error()
}
func (e *ErrDecrypt) Is(target error) bool {
_, ok := target.(*ErrDecrypt)
return ok || errors.Is(e.Err, target)
}
func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access, keys [][]byte) error { func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *seal.Access, keys [][]byte) error {
if keys == nil { if keys == nil {
return fmt.Errorf("keys were nil") return fmt.Errorf("keys were nil")
@@ -417,7 +448,7 @@ func writeStoredKeys(ctx context.Context, storage physical.Backend, encryptor *s
// Encrypt and marshal the keys // Encrypt and marshal the keys
blobInfo, err := encryptor.Encrypt(ctx, buf, nil) blobInfo, err := encryptor.Encrypt(ctx, buf, nil)
if err != nil { if err != nil {
return errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err) return &ErrEncrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)}
} }
value, err := proto.Marshal(blobInfo) value, err := proto.Marshal(blobInfo)
@@ -457,7 +488,7 @@ func readStoredKeys(ctx context.Context, storage physical.Backend, encryptor *se
pt, err := encryptor.Decrypt(ctx, blobInfo, nil) pt, err := encryptor.Decrypt(ctx, blobInfo, nil)
if err != nil { if err != nil {
return nil, errwrap.Wrapf("failed to decrypt encrypted stored keys: {{err}}", err) return nil, &ErrDecrypt{Err: errwrap.Wrapf("failed to encrypt keys for storage: {{err}}", err)}
} }
// Decode the barrier entry // Decode the barrier entry

View File

@@ -49,6 +49,7 @@ func (a *Access) Type() string {
return a.Wrapper.Type() return a.Wrapper.Type()
} }
// Encrypt uses the underlying seal to encrypt the plaintext and returns it.
func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) { func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrapping.EncryptedBlobInfo, err error) {
defer func(now time.Time) { defer func(now time.Time) {
metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now) metrics.MeasureSince([]string{"seal", "encrypt", "time"}, now)
@@ -66,6 +67,9 @@ func (a *Access) Encrypt(ctx context.Context, plaintext, aad []byte) (blob *wrap
return a.Wrapper.Encrypt(ctx, plaintext, aad) return a.Wrapper.Encrypt(ctx, plaintext, aad)
} }
// Decrypt uses the underlying seal to decrypt the cryptotext and returns it.
// Note that it is possible depending on the wrapper used that both pt and err
// are populated.
func (a *Access) Decrypt(ctx context.Context, data *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) { func (a *Access) Decrypt(ctx context.Context, data *wrapping.EncryptedBlobInfo, aad []byte) (pt []byte, err error) {
defer func(now time.Time) { defer func(now time.Time) {
metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now) metrics.MeasureSince([]string{"seal", "decrypt", "time"}, now)

View File

@@ -295,10 +295,6 @@ func TestCoreUnseal(core *Core, key []byte) (bool, error) {
return core.Unseal(key) return core.Unseal(key)
} }
func TestCoreUnsealWithRecoveryKeys(core *Core, key []byte) (bool, error) {
return core.UnsealWithRecoveryKeys(key)
}
// TestCoreUnsealed returns a pure in-memory core that is already // TestCoreUnsealed returns a pure in-memory core that is already
// initialized and unsealed. // initialized and unsealed.
func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) { func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
@@ -830,6 +826,7 @@ func (c *TestCluster) UnsealCoresWithError(useStoredKeys bool) error {
} }
func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) { func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
t.Helper()
var keys [][]byte var keys [][]byte
if core.seal.RecoveryKeySupported() { if core.seal.RecoveryKeySupported() {
keys = c.RecoveryKeys keys = c.RecoveryKeys
@@ -844,6 +841,7 @@ func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
} }
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) { func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
t.Helper()
if err := core.UnsealWithStoredKeys(context.Background()); err != nil { if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -1018,12 +1016,13 @@ type TestClusterOptions struct {
// do not clash with any other explicitly assigned ports in other tests. // do not clash with any other explicitly assigned ports in other tests.
BaseClusterListenPort int BaseClusterListenPort int
NumCores int NumCores int
SealFunc func() Seal SealFunc func() Seal
Logger log.Logger UnwrapSealFunc func() Seal
TempDir string Logger log.Logger
CACert []byte TempDir string
CAKey *ecdsa.PrivateKey CACert []byte
CAKey *ecdsa.PrivateKey
// PhysicalFactory is used to create backends. // PhysicalFactory is used to create backends.
// The int argument is the index of the core within the cluster, i.e. first // The int argument is the index of the core within the cluster, i.e. first
// core in cluster will have 0, second 1, etc. // core in cluster will have 0, second 1, etc.
@@ -1702,6 +1701,9 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo
if opts != nil && opts.SealFunc != nil { if opts != nil && opts.SealFunc != nil {
localConfig.Seal = opts.SealFunc() localConfig.Seal = opts.SealFunc()
} }
if opts != nil && opts.UnwrapSealFunc != nil {
localConfig.UnwrapSeal = opts.UnwrapSealFunc()
}
if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) { if coreConfig.Logger == nil || (opts != nil && opts.Logger != nil) {
localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx)) localConfig.Logger = testCluster.Logger.Named(fmt.Sprintf("core%d", idx))