Minor improvements to test cluster libraries (#25329)

Add WaitForMatchingMerkleRootsClients and Clients to sdk testcluster.  Fix internal TestCluster.SetRootToken, which wasn't updating the builtin clients' token.
This commit is contained in:
Nick Cabatoff
2024-02-09 09:45:43 -05:00
committed by GitHub
parent 53f0622af5
commit 1b8606d9ec
6 changed files with 64 additions and 41 deletions

3
changelog/25329.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
sdk/helper/testcluster: add some new helpers, improve some error messages.
```

View File

@@ -433,30 +433,6 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by
return newKeys
}
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
leader := cluster.Cores[0]
leaderInfos := []*raft.LeaderJoinInfo{
{
LeaderAPIAddr: leader.Client.Address(),
TLSConfig: leader.TLSConfig(),
},
}
// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)
}
cluster.UnsealCore(t, core)
}
WaitForNCoresUnsealed(t, cluster, len(cluster.Cores))
}
// HardcodedServerAddressProvider is a ServerAddressProvider that uses
// a hardcoded map of raft node addresses.
//

View File

@@ -4,6 +4,7 @@
package teststorage
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
@@ -18,7 +19,7 @@ import (
auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog"
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
"github.com/hashicorp/vault/builtin/plugin"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/internalshared/configutil"
@@ -233,6 +234,28 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.PhysicalFactory = SharedPhysicalFactory(MakeFileBackend)
}
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
leader := cluster.Cores[0]
leaderInfos := []*raft.LeaderJoinInfo{
{
LeaderAPIAddr: leader.Client.Address(),
TLSConfig: leader.TLSConfig(),
},
}
// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)
}
cluster.UnsealCore(t, core)
}
}
func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.KeepStandbysSealed = true
var bridge *raft.ClusterAddrBridge
@@ -252,7 +275,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
}
opts.SetupFunc = func(t testing.T, c *vault.TestCluster) {
if opts.NumCores != 1 {
testhelpers.RaftClusterJoinNodes(t, c)
RaftClusterJoinNodes(t, c)
time.Sleep(15 * time.Second)
}
}

View File

@@ -42,12 +42,12 @@ func EnablePerfPrimary(ctx context.Context, pri VaultCluster) error {
client := pri.Nodes()[0].APIClient()
_, err := client.Logical().WriteWithContext(ctx, "sys/replication/performance/primary/enable", nil)
if err != nil {
return err
return fmt.Errorf("error enabling perf primary: %w", err)
}
err = WaitForPerfReplicationState(ctx, pri, consts.ReplicationPerformancePrimary)
if err != nil {
return err
return fmt.Errorf("error waiting for perf primary to have the correct state: %w", err)
}
return WaitForActiveNodeAndPerfStandbys(ctx, pri)
}
@@ -108,6 +108,10 @@ func EnablePerformanceSecondary(ctx context.Context, perfToken string, pri, sec
}
func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec VaultCluster) error {
return WaitForMatchingMerkleRootsClients(ctx, endpoint, pri.Nodes()[0].APIClient(), sec.Nodes()[0].APIClient())
}
func WaitForMatchingMerkleRootsClients(ctx context.Context, endpoint string, pri, sec *api.Client) error {
getRoot := func(mode string, cli *api.Client) (string, error) {
status, err := cli.Logical().Read(endpoint + "status")
if err != nil {
@@ -122,16 +126,19 @@ func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec V
return status.Data["merkle_root"].(string), nil
}
secClient := sec.Nodes()[0].APIClient()
priClient := pri.Nodes()[0].APIClient()
for i := 0; i < 30; i++ {
secRoot, err := getRoot("secondary", secClient)
var priRoot, secRoot string
var err error
genRet := func() error {
return fmt.Errorf("unequal merkle roots, pri=%s sec=%s, err=%w", priRoot, secRoot, err)
}
for ctx.Err() == nil {
secRoot, err = getRoot("secondary", sec)
if err != nil {
return err
return genRet()
}
priRoot, err := getRoot("primary", priClient)
priRoot, err = getRoot("primary", pri)
if err != nil {
return err
return genRet()
}
if reflect.DeepEqual(priRoot, secRoot) {
@@ -281,15 +288,18 @@ func WaitForPerfReplicationWorking(ctx context.Context, pri, sec VaultCluster) e
func SetupTwoClusterPerfReplication(ctx context.Context, pri, sec VaultCluster) error {
if err := EnablePerfPrimary(ctx, pri); err != nil {
return err
return fmt.Errorf("failed to enable perf primary: %w", err)
}
perfToken, err := GetPerformanceToken(pri, sec.ClusterID(), "")
if err != nil {
return err
return fmt.Errorf("failed to get performance token from perf primary: %w", err)
}
_, err = EnablePerformanceSecondary(ctx, perfToken, pri, sec, false, false)
return err
if err != nil {
return fmt.Errorf("failed to enable perf secondary: %w", err)
}
return nil
}
// PassiveWaitForActiveNodeAndPerfStandbys should be used instead of

View File

@@ -281,7 +281,7 @@ func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster)
// this call to WaitForActiveNode by reworking the logic in this method.
leaderIdx, err := WaitForActiveNode(ctx, cluster)
if err != nil {
return err
return fmt.Errorf("did not find leader: %w", err)
}
if len(cluster.Nodes()) == 1 {
@@ -307,7 +307,7 @@ func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster)
time.Sleep(1 * time.Second)
}
if err != nil {
return fmt.Errorf("unable to mount KV engine: %v", err)
return fmt.Errorf("unable to mount KV engine: %w", err)
}
path := mountPoint + "/waitforactivenodeandperfstandbys"
var standbys, actives int64
@@ -381,11 +381,19 @@ func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster)
time.Sleep(time.Second)
}
if err != nil {
return fmt.Errorf("unable to unmount KV engine on primary")
return fmt.Errorf("unable to unmount KV engine: %w", err)
}
return nil
}
func Clients(vc VaultCluster) []*api.Client {
var ret []*api.Client
for _, n := range vc.Nodes() {
ret = append(ret, n.APIClient())
}
return ret
}
type GenerateRootKind int
const (

View File

@@ -717,6 +717,9 @@ type TestCluster struct {
func (c *TestCluster) SetRootToken(token string) {
c.RootToken = token
for _, c := range c.Cores {
c.Client.SetToken(token)
}
}
func (c *TestCluster) Start() {