mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-10-29 17:52:32 +00:00
Removal of go-testing-interface (CE changes) (#27578)
* Removal of go-testing-interface CE changes * CE only fine * Changelog * Changelog
This commit is contained in:
3
changelog/27578.txt
Normal file
3
changelog/27578.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:change
|
||||
cli: The undocumented `-dev-three-node` and `-dev-four-cluster` CLI options have been removed.
|
||||
```
|
||||
@@ -16,11 +16,6 @@ import (
|
||||
func entInitCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) {
|
||||
}
|
||||
|
||||
func entEnableFourClusterDev(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, tempDir string) int {
|
||||
c.logger.Error("-dev-four-cluster only supported in enterprise Vault")
|
||||
return 1
|
||||
}
|
||||
|
||||
func entAdjustCoreConfig(config *server.Config, coreConfig *vault.CoreConfig) {
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/hashicorp/vault/builtin/logical/pki"
|
||||
"github.com/hashicorp/vault/builtin/logical/ssh"
|
||||
"github.com/hashicorp/vault/builtin/logical/transit"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/builtinplugins"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
@@ -190,12 +189,12 @@ func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*ap
|
||||
func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) {
|
||||
tb.Helper()
|
||||
|
||||
cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts)
|
||||
cluster := vault.NewTestCluster(tb, coreConfig, opts)
|
||||
cluster.Start()
|
||||
|
||||
// Make it easy to get access to the active
|
||||
core := cluster.Cores[0].Core
|
||||
vault.TestWaitActive(benchhelpers.TBtoT(tb), core)
|
||||
vault.TestWaitActive(tb, core)
|
||||
|
||||
// Get the client already setup for us!
|
||||
client := cluster.Cores[0].Client
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -47,7 +46,6 @@ import (
|
||||
loghelper "github.com/hashicorp/vault/helper/logging"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
|
||||
"github.com/hashicorp/vault/helper/useragent"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/internalshared/configutil"
|
||||
@@ -64,7 +62,6 @@ import (
|
||||
"github.com/hashicorp/vault/vault/plugincatalog"
|
||||
vaultseal "github.com/hashicorp/vault/vault/seal"
|
||||
"github.com/hashicorp/vault/version"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
"github.com/posener/complete"
|
||||
"github.com/sasha-s/go-deadlock"
|
||||
"go.uber.org/atomic"
|
||||
@@ -138,8 +135,6 @@ type ServerCommand struct {
|
||||
flagDevNoKV bool
|
||||
flagDevKVV1 bool
|
||||
flagDevSkipInit bool
|
||||
flagDevThreeNode bool
|
||||
flagDevFourCluster bool
|
||||
flagDevTransactional bool
|
||||
flagDevAutoSeal bool
|
||||
flagDevClusterJson string
|
||||
@@ -374,20 +369,6 @@ func (c *ServerCommand) Flags() *FlagSets {
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
f.BoolVar(&BoolVar{
|
||||
Name: "dev-three-node",
|
||||
Target: &c.flagDevThreeNode,
|
||||
Default: false,
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
f.BoolVar(&BoolVar{
|
||||
Name: "dev-four-cluster",
|
||||
Target: &c.flagDevFourCluster,
|
||||
Default: false,
|
||||
Hidden: true,
|
||||
})
|
||||
|
||||
f.BoolVar(&BoolVar{
|
||||
Name: "dev-consul",
|
||||
Target: &c.flagDevConsul,
|
||||
@@ -1039,7 +1020,7 @@ func (c *ServerCommand) Run(args []string) int {
|
||||
}
|
||||
|
||||
// Automatically enable dev mode if other dev flags are provided.
|
||||
if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevNoKV || c.flagDevTLS {
|
||||
if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevNoKV || c.flagDevTLS {
|
||||
c.flagDev = true
|
||||
}
|
||||
|
||||
@@ -1103,11 +1084,6 @@ func (c *ServerCommand) Run(args []string) int {
|
||||
|
||||
f.applyLogConfigOverrides(config.SharedConfig)
|
||||
|
||||
// Set 'trace' log level for the following 'dev' clusters
|
||||
if c.flagDevThreeNode || c.flagDevFourCluster {
|
||||
config.LogLevel = "trace"
|
||||
}
|
||||
|
||||
l, err := c.configureLogging(config)
|
||||
if err != nil {
|
||||
c.UI.Error(err.Error())
|
||||
@@ -1275,13 +1251,6 @@ func (c *ServerCommand) Run(args []string) int {
|
||||
}()
|
||||
|
||||
coreConfig := createCoreConfig(c, config, backend, configSR, setSealResponse.barrierSeal, setSealResponse.unwrapSeal, metricsHelper, metricSink, secureRandomReader)
|
||||
if c.flagDevThreeNode {
|
||||
return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
||||
}
|
||||
|
||||
if c.flagDevFourCluster {
|
||||
return entEnableFourClusterDev(c, &coreConfig, info, infoKeys, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
||||
}
|
||||
|
||||
if allowPendingRemoval := os.Getenv(consts.EnvVaultAllowPendingRemovalMounts); allowPendingRemoval != "" {
|
||||
var err error
|
||||
@@ -1599,7 +1568,7 @@ func (c *ServerCommand) Run(args []string) int {
|
||||
clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)
|
||||
}
|
||||
|
||||
if c.flagDevClusterJson != "" && !c.flagDevThreeNode {
|
||||
if c.flagDevClusterJson != "" {
|
||||
b, err := jsonutil.EncodeJSON(clusterJson)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err))
|
||||
@@ -2115,245 +2084,6 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
|
||||
return init, nil
|
||||
}
|
||||
|
||||
func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int {
|
||||
conf, opts := teststorage.ClusterSetup(base, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
BaseListenAddress: c.flagDevListenAddr,
|
||||
Logger: c.logger,
|
||||
TempDir: tempDir,
|
||||
DefaultHandlerProperties: vault.HandlerProperties{
|
||||
ListenerConfig: &configutil.Listener{
|
||||
Profiling: configutil.ListenerProfiling{
|
||||
UnauthenticatedPProfAccess: true,
|
||||
},
|
||||
Telemetry: configutil.ListenerTelemetry{
|
||||
UnauthenticatedMetricsAccess: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
testCluster := vault.NewTestCluster(&testing.RuntimeT{}, conf, opts)
|
||||
defer c.cleanupGuard.Do(testCluster.Cleanup)
|
||||
|
||||
if constants.IsEnterprise {
|
||||
err := testcluster.WaitForActiveNodeAndPerfStandbys(context.Background(), testCluster)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("perf standbys didn't become ready: %v", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
info["cluster parameters path"] = testCluster.TempDir
|
||||
infoKeys = append(infoKeys, "cluster parameters path")
|
||||
|
||||
for i, core := range testCluster.Cores {
|
||||
info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
|
||||
infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i))
|
||||
}
|
||||
|
||||
infoKeys = append(infoKeys, "version")
|
||||
verInfo := version.GetVersion()
|
||||
info["version"] = verInfo.FullVersionNumber(false)
|
||||
if verInfo.Revision != "" {
|
||||
info["version sha"] = strings.Trim(verInfo.Revision, "'")
|
||||
infoKeys = append(infoKeys, "version sha")
|
||||
}
|
||||
|
||||
infoKeys = append(infoKeys, "cgo")
|
||||
info["cgo"] = "disabled"
|
||||
if version.CgoEnabled {
|
||||
info["cgo"] = "enabled"
|
||||
}
|
||||
|
||||
infoKeys = append(infoKeys, "go version")
|
||||
info["go version"] = runtime.Version()
|
||||
|
||||
fipsStatus := entGetFIPSInfoKey()
|
||||
if fipsStatus != "" {
|
||||
infoKeys = append(infoKeys, "fips")
|
||||
info["fips"] = fipsStatus
|
||||
}
|
||||
|
||||
// Server configuration output
|
||||
padding := 24
|
||||
|
||||
sort.Strings(infoKeys)
|
||||
c.UI.Output("==> Vault server configuration:\n")
|
||||
|
||||
for _, k := range infoKeys {
|
||||
c.UI.Output(fmt.Sprintf(
|
||||
"%s%s: %s",
|
||||
strings.Repeat(" ", padding-len(k)),
|
||||
strings.Title(k),
|
||||
info[k]))
|
||||
}
|
||||
|
||||
c.UI.Output("")
|
||||
|
||||
for _, core := range testCluster.Cores {
|
||||
core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{
|
||||
Core: core.Core,
|
||||
ListenerConfig: &configutil.Listener{},
|
||||
})
|
||||
core.SetClusterHandler(core.Server.Handler)
|
||||
}
|
||||
|
||||
testCluster.Start()
|
||||
|
||||
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
|
||||
|
||||
if base.DevToken != "" {
|
||||
req := &logical.Request{
|
||||
ID: "dev-gen-root",
|
||||
Operation: logical.UpdateOperation,
|
||||
ClientToken: testCluster.RootToken,
|
||||
Path: "auth/token/create",
|
||||
Data: map[string]interface{}{
|
||||
"id": base.DevToken,
|
||||
"policies": []string{"root"},
|
||||
"no_parent": true,
|
||||
"no_default_policy": true,
|
||||
},
|
||||
}
|
||||
resp, err := testCluster.Cores[0].HandleRequest(ctx, req)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
|
||||
return 1
|
||||
}
|
||||
if resp == nil {
|
||||
c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
|
||||
return 1
|
||||
}
|
||||
if resp.Auth == nil {
|
||||
c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
|
||||
return 1
|
||||
}
|
||||
|
||||
testCluster.RootToken = resp.Auth.ClientToken
|
||||
|
||||
req.ID = "dev-revoke-init-root"
|
||||
req.Path = "auth/token/revoke-self"
|
||||
req.Data = nil
|
||||
_, err = testCluster.Cores[0].HandleRequest(ctx, req)
|
||||
if err != nil {
|
||||
c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Set the token
|
||||
tokenHelper, err := c.TokenHelper()
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err))
|
||||
return 1
|
||||
}
|
||||
if err := tokenHelper.Store(testCluster.RootToken); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0o600); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
c.UI.Output(fmt.Sprintf(
|
||||
"==> Three node dev mode is enabled\n\n" +
|
||||
"The unseal key and root token are reproduced below in case you\n" +
|
||||
"want to seal/unseal the Vault or play with authentication.\n",
|
||||
))
|
||||
|
||||
for i, key := range testCluster.BarrierKeys {
|
||||
c.UI.Output(fmt.Sprintf(
|
||||
"Unseal Key %d: %s",
|
||||
i+1, base64.StdEncoding.EncodeToString(key),
|
||||
))
|
||||
}
|
||||
|
||||
c.UI.Output(fmt.Sprintf(
|
||||
"\nRoot Token: %s\n", testCluster.RootToken,
|
||||
))
|
||||
|
||||
c.UI.Output(fmt.Sprintf(
|
||||
"\nUseful env vars:\n"+
|
||||
"VAULT_TOKEN=%s\n"+
|
||||
"VAULT_ADDR=%s\n"+
|
||||
"VAULT_CACERT=%s/ca_cert.pem\n",
|
||||
testCluster.RootToken,
|
||||
testCluster.Cores[0].Client.Address(),
|
||||
testCluster.TempDir,
|
||||
))
|
||||
|
||||
if c.flagDevClusterJson != "" {
|
||||
clusterJson := testcluster.ClusterJson{
|
||||
Nodes: []testcluster.ClusterNode{},
|
||||
CACertPath: filepath.Join(testCluster.TempDir, "ca_cert.pem"),
|
||||
RootToken: testCluster.RootToken,
|
||||
}
|
||||
for _, core := range testCluster.Cores {
|
||||
clusterJson.Nodes = append(clusterJson.Nodes, testcluster.ClusterNode{
|
||||
APIAddress: core.Client.Address(),
|
||||
})
|
||||
}
|
||||
b, err := jsonutil.EncodeJSON(clusterJson)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err))
|
||||
return 1
|
||||
}
|
||||
err = os.WriteFile(c.flagDevClusterJson, b, 0o600)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Output the header that the server has started
|
||||
c.UI.Output("==> Vault server started! Log data will stream in below:\n")
|
||||
|
||||
// Inform any tests that the server is ready
|
||||
select {
|
||||
case c.startedCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// Release the log gate.
|
||||
c.flushLog()
|
||||
|
||||
// Wait for shutdown
|
||||
shutdownTriggered := false
|
||||
|
||||
for !shutdownTriggered {
|
||||
select {
|
||||
case <-c.ShutdownCh:
|
||||
c.UI.Output("==> Vault shutdown triggered")
|
||||
|
||||
// Stop the listeners so that we don't process further client requests.
|
||||
c.cleanupGuard.Do(testCluster.Cleanup)
|
||||
|
||||
// Finalize will wait until after Vault is sealed, which means the
|
||||
// request forwarding listeners will also be closed (and also
|
||||
// waited for).
|
||||
for _, core := range testCluster.Cores {
|
||||
if err := core.Shutdown(); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
shutdownTriggered = true
|
||||
|
||||
case <-c.SighupCh:
|
||||
c.UI.Output("==> Vault reload triggered")
|
||||
for _, core := range testCluster.Cores {
|
||||
if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil, core.Core); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// addPlugin adds any plugins to the catalog
|
||||
func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error {
|
||||
// Get the sha256 of the file at the given path.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
@@ -19,14 +20,13 @@ import (
|
||||
"github.com/hashicorp/vault/sdk/framework"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
var externalPlugins = []string{"transform", "kmip", "keymgmt"}
|
||||
|
||||
// RetryUntil runs f until it returns a nil result or the timeout is reached.
|
||||
// If a nil result hasn't been obtained by timeout, calls t.Fatal.
|
||||
func RetryUntil(t testing.T, timeout time.Duration, f func() error) {
|
||||
func RetryUntil(t testing.TB, timeout time.Duration, f func() error) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
var err error
|
||||
@@ -41,7 +41,7 @@ func RetryUntil(t testing.T, timeout time.Duration, f func() error) {
|
||||
|
||||
// MakeTestPluginDir creates a temporary directory suitable for holding plugins.
|
||||
// This helper also resolves symlinks to make tests happy on OS X.
|
||||
func MakeTestPluginDir(t testing.T) string {
|
||||
func MakeTestPluginDir(t testing.TB) string {
|
||||
t.Helper()
|
||||
|
||||
dir, err := os.MkdirTemp("", "")
|
||||
@@ -210,11 +210,11 @@ type TestLogger struct {
|
||||
sink hclog.SinkAdapter
|
||||
}
|
||||
|
||||
func NewTestLogger(t testing.T) *TestLogger {
|
||||
func NewTestLogger(t testing.TB) *TestLogger {
|
||||
return NewTestLoggerWithSuffix(t, "")
|
||||
}
|
||||
|
||||
func NewTestLoggerWithSuffix(t testing.T, logFileSuffix string) *TestLogger {
|
||||
func NewTestLoggerWithSuffix(t testing.TB, logFileSuffix string) *TestLogger {
|
||||
var logFile *os.File
|
||||
var logPath string
|
||||
output := os.Stderr
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
logicalDb "github.com/hashicorp/vault/builtin/logical/database"
|
||||
@@ -15,7 +17,6 @@ import (
|
||||
"github.com/hashicorp/vault/sdk/physical/inmem"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// NewTestSoloCluster is a simpler version of NewTestCluster that only creates
|
||||
@@ -23,7 +24,7 @@ import (
|
||||
// from vault.TestClusterOptions, use NewTestCluster instead. It should work fine
|
||||
// with a nil config argument. There is no need to call Start or Cleanup or
|
||||
// TestWaitActive on the resulting cluster.
|
||||
func NewTestSoloCluster(t testing.T, config *vault.CoreConfig) *vault.TestCluster {
|
||||
func NewTestSoloCluster(t testing.TB, config *vault.CoreConfig) *vault.TestCluster {
|
||||
logger := corehelpers.NewTestLogger(t)
|
||||
|
||||
mycfg := &vault.CoreConfig{}
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +34,7 @@ type TestPlugin struct {
|
||||
ImageSha256 string
|
||||
}
|
||||
|
||||
func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, string) {
|
||||
func GetPlugin(t testing.TB, typ consts.PluginType) (string, string, string, string) {
|
||||
t.Helper()
|
||||
var pluginName string
|
||||
var pluginType string
|
||||
@@ -65,7 +65,7 @@ func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, stri
|
||||
|
||||
// to mount a plugin, we need a working binary plugin, so we compile one here.
|
||||
// pluginVersion is used to override the plugin's self-reported version
|
||||
func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin {
|
||||
func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin {
|
||||
t.Helper()
|
||||
|
||||
pluginName, pluginType, pluginMain, pluginVersionLocation := GetPlugin(t, typ)
|
||||
@@ -149,7 +149,7 @@ func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, plu
|
||||
}
|
||||
}
|
||||
|
||||
func BuildPluginContainerImage(t testing.T, plugin TestPlugin, pluginDir string) (image string, sha256 string) {
|
||||
func BuildPluginContainerImage(t testing.TB, plugin TestPlugin, pluginDir string) (image string, sha256 string) {
|
||||
t.Helper()
|
||||
ref := plugin.Name
|
||||
if plugin.Version != "" {
|
||||
|
||||
@@ -6,6 +6,7 @@ package sealhelper
|
||||
import (
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/builtin/logical/transit"
|
||||
@@ -16,14 +17,13 @@ import (
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
type TransitSealServer struct {
|
||||
*vault.TestCluster
|
||||
}
|
||||
|
||||
func NewTransitSealServer(t testing.T, idx int) *TransitSealServer {
|
||||
func NewTransitSealServer(t testing.TB, idx int) *TransitSealServer {
|
||||
conf := &vault.CoreConfig{
|
||||
LogicalBackends: map[string]logical.Factory{
|
||||
"transit": transit.Factory,
|
||||
@@ -47,7 +47,7 @@ func NewTransitSealServer(t testing.T, idx int) *TransitSealServer {
|
||||
return &TransitSealServer{cluster}
|
||||
}
|
||||
|
||||
func (tss *TransitSealServer) MakeKey(t testing.T, key string) {
|
||||
func (tss *TransitSealServer) MakeKey(t testing.TB, key string) {
|
||||
client := tss.Cores[0].Client
|
||||
if _, err := client.Logical().Write(path.Join("transit", "keys", key), nil); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -59,7 +59,7 @@ func (tss *TransitSealServer) MakeKey(t testing.T, key string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, error) {
|
||||
func (tss *TransitSealServer) MakeSeal(t testing.TB, key string) (vault.Seal, error) {
|
||||
client := tss.Cores[0].Client
|
||||
wrapperConfig := map[string]string{
|
||||
"address": client.Address(),
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
@@ -23,7 +24,6 @@ import (
|
||||
"github.com/hashicorp/vault/physical/raft"
|
||||
"github.com/hashicorp/vault/sdk/helper/xor"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
//go:generate enumer -type=GenerateRootKind -trimprefix=GenerateRoot
|
||||
@@ -36,7 +36,7 @@ const (
|
||||
)
|
||||
|
||||
// GenerateRoot generates a root token on the target cluster.
|
||||
func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string {
|
||||
func GenerateRoot(t testing.TB, cluster *vault.TestCluster, kind GenerateRootKind) string {
|
||||
t.Helper()
|
||||
token, err := GenerateRootWithError(t, cluster, kind)
|
||||
if err != nil {
|
||||
@@ -45,7 +45,7 @@ func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind
|
||||
return token
|
||||
}
|
||||
|
||||
func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) {
|
||||
func GenerateRootWithError(t testing.TB, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) {
|
||||
t.Helper()
|
||||
// If recovery keys supported, use those to perform root token generation instead
|
||||
var keys [][]byte
|
||||
@@ -118,14 +118,14 @@ func RandomWithPrefix(name string) string {
|
||||
return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
||||
}
|
||||
|
||||
func EnsureCoresSealed(t testing.T, c *vault.TestCluster) {
|
||||
func EnsureCoresSealed(t testing.TB, c *vault.TestCluster) {
|
||||
t.Helper()
|
||||
for _, core := range c.Cores {
|
||||
EnsureCoreSealed(t, core)
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) {
|
||||
func EnsureCoreSealed(t testing.TB, core *vault.TestClusterCore) {
|
||||
t.Helper()
|
||||
core.Seal(t)
|
||||
timeout := time.Now().Add(60 * time.Second)
|
||||
@@ -140,7 +140,7 @@ func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) {
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
|
||||
func EnsureCoresUnsealed(t testing.TB, c *vault.TestCluster) {
|
||||
t.Helper()
|
||||
for i, core := range c.Cores {
|
||||
err := AttemptUnsealCore(c, core)
|
||||
@@ -150,7 +150,7 @@ func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) {
|
||||
func EnsureCoreUnsealed(t testing.TB, c *vault.TestCluster, core *vault.TestClusterCore) {
|
||||
t.Helper()
|
||||
err := AttemptUnsealCore(c, core)
|
||||
if err != nil {
|
||||
@@ -208,17 +208,17 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) {
|
||||
func EnsureStableActiveNode(t testing.TB, cluster *vault.TestCluster) {
|
||||
t.Helper()
|
||||
deriveStableActiveCore(t, cluster)
|
||||
}
|
||||
|
||||
func DeriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
func DeriveStableActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
t.Helper()
|
||||
return deriveStableActiveCore(t, cluster)
|
||||
}
|
||||
|
||||
func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
func deriveStableActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
t.Helper()
|
||||
activeCore := DeriveActiveCore(t, cluster)
|
||||
minDuration := time.NewTimer(3 * time.Second)
|
||||
@@ -247,7 +247,7 @@ func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.Test
|
||||
return activeCore
|
||||
}
|
||||
|
||||
func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
func DeriveActiveCore(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
t.Helper()
|
||||
for i := 0; i < 60; i++ {
|
||||
for _, core := range cluster.Cores {
|
||||
@@ -268,7 +268,7 @@ func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestCluste
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestClusterCore {
|
||||
func DeriveStandbyCores(t testing.TB, cluster *vault.TestCluster) []*vault.TestClusterCore {
|
||||
t.Helper()
|
||||
cores := make([]*vault.TestClusterCore, 0, 2)
|
||||
for _, core := range cluster.Cores {
|
||||
@@ -287,7 +287,7 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl
|
||||
return cores
|
||||
}
|
||||
|
||||
func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) {
|
||||
func WaitForNCoresUnsealed(t testing.TB, cluster *vault.TestCluster, n int) {
|
||||
t.Helper()
|
||||
for i := 0; i < 30; i++ {
|
||||
unsealed := 0
|
||||
@@ -306,7 +306,7 @@ func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) {
|
||||
t.Fatalf("%d cores were not unsealed", n)
|
||||
}
|
||||
|
||||
func SealCores(t testing.T, cluster *vault.TestCluster) {
|
||||
func SealCores(t testing.TB, cluster *vault.TestCluster) {
|
||||
t.Helper()
|
||||
for _, core := range cluster.Cores {
|
||||
if err := core.Shutdown(); err != nil {
|
||||
@@ -325,7 +325,7 @@ func SealCores(t testing.T, cluster *vault.TestCluster) {
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
|
||||
func WaitForNCoresSealed(t testing.TB, cluster *vault.TestCluster, n int) {
|
||||
t.Helper()
|
||||
for i := 0; i < 60; i++ {
|
||||
sealed := 0
|
||||
@@ -344,7 +344,7 @@ func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
|
||||
t.Fatalf("%d cores were not sealed", n)
|
||||
}
|
||||
|
||||
func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
func WaitForActiveNode(t testing.TB, cluster *vault.TestCluster) *vault.TestClusterCore {
|
||||
t.Helper()
|
||||
for i := 0; i < 60; i++ {
|
||||
for _, core := range cluster.Cores {
|
||||
@@ -360,7 +360,7 @@ func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClust
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) {
|
||||
func WaitForStandbyNode(t testing.TB, core *vault.TestClusterCore) {
|
||||
t.Helper()
|
||||
for i := 0; i < 30; i++ {
|
||||
if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" {
|
||||
@@ -376,7 +376,7 @@ func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) {
|
||||
t.Fatalf("node did not become standby")
|
||||
}
|
||||
|
||||
func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]byte {
|
||||
func RekeyCluster(t testing.TB, cluster *vault.TestCluster, recovery bool) [][]byte {
|
||||
t.Helper()
|
||||
cluster.Logger.Info("rekeying cluster", "recovery", recovery)
|
||||
client := cluster.Cores[0].Client
|
||||
@@ -505,7 +505,7 @@ func RaftAppliedIndex(core *vault.TestClusterCore) uint64 {
|
||||
return core.UnderlyingRawStorage.(*raft.RaftBackend).AppliedIndex()
|
||||
}
|
||||
|
||||
func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) {
|
||||
func WaitForRaftApply(t testing.TB, core *vault.TestClusterCore, index uint64) {
|
||||
t.Helper()
|
||||
|
||||
backend := core.UnderlyingRawStorage.(*raft.RaftBackend)
|
||||
@@ -521,7 +521,7 @@ func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) {
|
||||
}
|
||||
|
||||
// AwaitLeader waits for one of the cluster's nodes to become leader.
|
||||
func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) {
|
||||
func AwaitLeader(t testing.TB, cluster *vault.TestCluster) (int, error) {
|
||||
timeout := time.Now().Add(60 * time.Second)
|
||||
for {
|
||||
if time.Now().After(timeout) {
|
||||
@@ -545,7 +545,7 @@ func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) {
|
||||
return 0, fmt.Errorf("timeout waiting leader")
|
||||
}
|
||||
|
||||
func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
|
||||
func GenerateDebugLogs(t testing.TB, client *api.Client) chan struct{} {
|
||||
t.Helper()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
@@ -584,7 +584,7 @@ func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} {
|
||||
// from the map by removing entries whose keys are in the raft configuration.
|
||||
// Remaining entries result in an error return so that the caller can poll for
|
||||
// an expected configuration.
|
||||
func VerifyRaftPeers(t testing.T, client *api.Client, expected map[string]bool) error {
|
||||
func VerifyRaftPeers(t testing.TB, client *api.Client, expected map[string]bool) error {
|
||||
t.Helper()
|
||||
|
||||
resp, err := client.Logical().Read("sys/storage/raft/configuration")
|
||||
@@ -720,7 +720,7 @@ func SetNonRootToken(client *api.Client) error {
|
||||
|
||||
// RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached.
|
||||
// If a nil result hasn't been obtained by timeout, calls t.Fatal.
|
||||
func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() error) {
|
||||
func RetryUntilAtCadence(t testing.TB, timeout, sleepTime time.Duration, f func() error) {
|
||||
t.Helper()
|
||||
fail := func(err error) {
|
||||
t.Fatalf("did not complete before deadline, err: %v", err)
|
||||
@@ -730,7 +730,7 @@ func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func()
|
||||
|
||||
// RetryUntilAtCadenceWithHandler runs f until it returns a nil result or the timeout is reached.
|
||||
// If a nil result hasn't been obtained by timeout, onFailure is called.
|
||||
func RetryUntilAtCadenceWithHandler(t testing.T, timeout, sleepTime time.Duration, onFailure func(error), f func() error) {
|
||||
func RetryUntilAtCadenceWithHandler(t testing.TB, timeout, sleepTime time.Duration, onFailure func(error), f func() error) {
|
||||
t.Helper()
|
||||
deadline := time.Now().Add(timeout)
|
||||
var err error
|
||||
@@ -748,20 +748,20 @@ func RetryUntilAtCadenceWithHandler(t testing.T, timeout, sleepTime time.Duratio
|
||||
// If a nil result hasn't been obtained by timeout, calls t.Fatal.
|
||||
// NOTE: See RetryUntilAtCadence if you want to specify a different wait/sleep
|
||||
// duration between calls.
|
||||
func RetryUntil(t testing.T, timeout time.Duration, f func() error) {
|
||||
func RetryUntil(t testing.TB, timeout time.Duration, f func() error) {
|
||||
t.Helper()
|
||||
RetryUntilAtCadence(t, timeout, 100*time.Millisecond, f)
|
||||
}
|
||||
|
||||
// CreateEntityAndAlias clones an existing client and creates an entity/alias, uses userpass mount path
|
||||
// It returns the cloned client, entityID, and aliasID.
|
||||
func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) {
|
||||
func CreateEntityAndAlias(t testing.TB, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) {
|
||||
return CreateEntityAndAliasWithinMount(t, client, mountAccessor, "userpass", entityName, aliasName)
|
||||
}
|
||||
|
||||
// CreateEntityAndAliasWithinMount clones an existing client and creates an entity/alias, within the specified mountPath
|
||||
// It returns the cloned client, entityID, and aliasID.
|
||||
func CreateEntityAndAliasWithinMount(t testing.T, client *api.Client, mountAccessor, mountPath, entityName, aliasName string) (*api.Client, string, string) {
|
||||
func CreateEntityAndAliasWithinMount(t testing.TB, client *api.Client, mountAccessor, mountPath, entityName, aliasName string) (*api.Client, string, string) {
|
||||
t.Helper()
|
||||
userClient, err := client.Clone()
|
||||
if err != nil {
|
||||
@@ -802,7 +802,7 @@ func CreateEntityAndAliasWithinMount(t testing.T, client *api.Client, mountAcces
|
||||
|
||||
// SetupTOTPMount enables the totp secrets engine by mounting it. This requires
|
||||
// that the test cluster has a totp backend available.
|
||||
func SetupTOTPMount(t testing.T, client *api.Client) {
|
||||
func SetupTOTPMount(t testing.TB, client *api.Client) {
|
||||
t.Helper()
|
||||
// Mount the TOTP backend
|
||||
mountInfo := &api.MountInput{
|
||||
@@ -814,7 +814,7 @@ func SetupTOTPMount(t testing.T, client *api.Client) {
|
||||
}
|
||||
|
||||
// SetupTOTPMethod configures the TOTP secrets engine with a provided config map.
|
||||
func SetupTOTPMethod(t testing.T, client *api.Client, config map[string]interface{}) string {
|
||||
func SetupTOTPMethod(t testing.TB, client *api.Client, config map[string]interface{}) string {
|
||||
t.Helper()
|
||||
|
||||
resp1, err := client.Logical().Write("identity/mfa/method/totp", config)
|
||||
@@ -833,7 +833,7 @@ func SetupTOTPMethod(t testing.T, client *api.Client, config map[string]interfac
|
||||
|
||||
// SetupMFALoginEnforcement configures a single enforcement method using the
|
||||
// provided config map. "name" field is required in the config map.
|
||||
func SetupMFALoginEnforcement(t testing.T, client *api.Client, config map[string]interface{}) {
|
||||
func SetupMFALoginEnforcement(t testing.TB, client *api.Client, config map[string]interface{}) {
|
||||
t.Helper()
|
||||
enfName, ok := config["name"]
|
||||
if !ok {
|
||||
@@ -848,7 +848,7 @@ func SetupMFALoginEnforcement(t testing.T, client *api.Client, config map[string
|
||||
// SetupUserpassMountAccessor sets up userpass auth and returns its mount
|
||||
// accessor. This requires that the test cluster has a "userpass" auth method
|
||||
// available.
|
||||
func SetupUserpassMountAccessor(t testing.T, client *api.Client) string {
|
||||
func SetupUserpassMountAccessor(t testing.TB, client *api.Client) string {
|
||||
t.Helper()
|
||||
// Enable Userpass authentication
|
||||
err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{
|
||||
@@ -871,7 +871,7 @@ func SetupUserpassMountAccessor(t testing.T, client *api.Client) string {
|
||||
|
||||
// RegisterEntityInTOTPEngine registers an entity with a methodID and returns
|
||||
// the generated name.
|
||||
func RegisterEntityInTOTPEngine(t testing.T, client *api.Client, entityID, methodID string) string {
|
||||
func RegisterEntityInTOTPEngine(t testing.TB, client *api.Client, entityID, methodID string) string {
|
||||
t.Helper()
|
||||
totpGenName := fmt.Sprintf("%s-%s", entityID, methodID)
|
||||
secret, err := client.Logical().WriteWithContext(context.Background(), "identity/mfa/method/totp/admin-generate", map[string]interface{}{
|
||||
@@ -905,7 +905,7 @@ func RegisterEntityInTOTPEngine(t testing.T, client *api.Client, entityID, metho
|
||||
}
|
||||
|
||||
// GetTOTPCodeFromEngine requests a TOTP code from the specified enginePath.
|
||||
func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) string {
|
||||
func GetTOTPCodeFromEngine(t testing.TB, client *api.Client, enginePath string) string {
|
||||
t.Helper()
|
||||
totpPath := fmt.Sprintf("totp/code/%s", enginePath)
|
||||
secret, err := client.Logical().ReadWithContext(context.Background(), totpPath)
|
||||
@@ -920,7 +920,7 @@ func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) s
|
||||
|
||||
// SetupLoginMFATOTP setups up a TOTP MFA using some basic configuration and
|
||||
// returns all relevant information to the client.
|
||||
func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) {
|
||||
func SetupLoginMFATOTP(t testing.TB, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) {
|
||||
t.Helper()
|
||||
// Mount the totp secrets engine
|
||||
SetupTOTPMount(t, client)
|
||||
@@ -956,7 +956,7 @@ func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitP
|
||||
return entityClient, entityID, methodID
|
||||
}
|
||||
|
||||
func SkipUnlessEnvVarsSet(t testing.T, envVars []string) {
|
||||
func SkipUnlessEnvVarsSet(t testing.TB, envVars []string) {
|
||||
t.Helper()
|
||||
|
||||
for _, i := range envVars {
|
||||
@@ -974,7 +974,7 @@ func SkipUnlessEnvVarsSet(t testing.T, envVars []string) {
|
||||
// The intention/use case for this function is to allow a cluster to start and become active with one
|
||||
// or more nodes not joined, so that we can test scenarios where a node joins later.
|
||||
// e.g. 4 nodes in the cluster, only 3 nodes in cluster 'active', 1 node can be joined later in tests.
|
||||
func WaitForNodesExcludingSelectedStandbys(t testing.T, cluster *vault.TestCluster, indexesToSkip ...int) {
|
||||
func WaitForNodesExcludingSelectedStandbys(t testing.TB, cluster *vault.TestCluster, indexesToSkip ...int) {
|
||||
WaitForActiveNode(t, cluster)
|
||||
|
||||
contains := func(elems []int, e int) bool {
|
||||
|
||||
@@ -6,13 +6,14 @@
|
||||
package testhelpers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// WaitForActiveNodeAndStandbys does nothing more than wait for the active node
|
||||
// on OSS. On enterprise it waits for perf standbys to be healthy too.
|
||||
func WaitForActiveNodeAndStandbys(t testing.T, cluster *vault.TestCluster) {
|
||||
func WaitForActiveNodeAndStandbys(t testing.TB, cluster *vault.TestCluster) {
|
||||
WaitForActiveNode(t, cluster)
|
||||
for _, core := range cluster.Cores {
|
||||
if standby, _ := core.Core.Standby(); standby {
|
||||
|
||||
@@ -5,16 +5,16 @@ package consul
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
realtesting "testing"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/consul"
|
||||
physConsul "github.com/hashicorp/vault/physical/consul"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
func MakeConsulBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
func MakeConsulBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true)
|
||||
|
||||
consulConf := map[string]string{
|
||||
@@ -56,7 +56,7 @@ type consulContainerManager struct {
|
||||
current *consulContainerBackendFactory
|
||||
}
|
||||
|
||||
func (m *consulContainerManager) Backend(t testing.T, coreIdx int,
|
||||
func (m *consulContainerManager) Backend(t testing.TB, coreIdx int,
|
||||
logger hclog.Logger, conf map[string]interface{},
|
||||
) *vault.PhysicalBackendBundle {
|
||||
m.mu.Lock()
|
||||
@@ -77,7 +77,7 @@ type consulContainerBackendFactory struct {
|
||||
config map[string]string
|
||||
}
|
||||
|
||||
func (f *consulContainerBackendFactory) Backend(t testing.T, coreIdx int,
|
||||
func (f *consulContainerBackendFactory) Backend(t testing.TB, coreIdx int,
|
||||
logger hclog.Logger, conf map[string]interface{},
|
||||
) *vault.PhysicalBackendBundle {
|
||||
f.mu.Lock()
|
||||
@@ -100,7 +100,7 @@ func (f *consulContainerBackendFactory) Backend(t testing.T, coreIdx int,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *consulContainerBackendFactory) startContainerLocked(t testing.T) {
|
||||
func (f *consulContainerBackendFactory) startContainerLocked(t testing.TB) {
|
||||
cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true)
|
||||
f.config = map[string]string{
|
||||
"address": config.Address(),
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
@@ -25,10 +26,9 @@ import (
|
||||
physFile "github.com/hashicorp/vault/sdk/physical/file"
|
||||
"github.com/hashicorp/vault/sdk/physical/inmem"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
func MakeInmemBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
inm, err := inmem.NewTransactionalInmem(nil, logger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -44,7 +44,7 @@ func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBu
|
||||
}
|
||||
}
|
||||
|
||||
func MakeLatentInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
func MakeLatentInmemBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
jitter := r.Intn(20)
|
||||
latency := time.Duration(r.Intn(15)) * time.Millisecond
|
||||
@@ -55,7 +55,7 @@ func MakeLatentInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBac
|
||||
return pbb
|
||||
}
|
||||
|
||||
func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
func MakeInmemNonTransactionalBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
inm, err := inmem.NewInmem(nil, logger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -71,7 +71,7 @@ func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.P
|
||||
}
|
||||
}
|
||||
|
||||
func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
func MakeFileBackend(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle {
|
||||
path, err := ioutil.TempDir("", "vault-integ-file-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -101,7 +101,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun
|
||||
}
|
||||
}
|
||||
|
||||
func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle {
|
||||
func MakeRaftBackend(t testing.TB, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle {
|
||||
nodeID := fmt.Sprintf("core-%d", coreIdx)
|
||||
raftDir, err := ioutil.TempDir("", "vault-raft-")
|
||||
if err != nil {
|
||||
@@ -155,8 +155,8 @@ func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[
|
||||
// RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend
|
||||
// and the physical.Backend provided in PhysicalBackendBundler as the storage
|
||||
// backend.
|
||||
func RaftHAFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
func RaftHAFactory(f PhysicalBackendBundler) func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
return func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
// Call the factory func to create the storage backend
|
||||
physFactory := SharedPhysicalFactory(f)
|
||||
bundle := physFactory(t, coreIdx, logger, nil)
|
||||
@@ -201,10 +201,10 @@ func RaftHAFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logg
|
||||
}
|
||||
}
|
||||
|
||||
type PhysicalBackendBundler func(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle
|
||||
type PhysicalBackendBundler func(t testing.TB, logger hclog.Logger) *vault.PhysicalBackendBundle
|
||||
|
||||
func SharedPhysicalFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
func SharedPhysicalFactory(f PhysicalBackendBundler) func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
return func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
if coreIdx == 0 {
|
||||
return f(t, logger)
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
opts.PhysicalFactory = SharedPhysicalFactory(MakeFileBackend)
|
||||
}
|
||||
|
||||
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
|
||||
func RaftClusterJoinNodes(t testing.TB, cluster *vault.TestCluster) {
|
||||
leader := cluster.Cores[0]
|
||||
|
||||
leaderInfos := []*raft.LeaderJoinInfo{
|
||||
@@ -255,7 +255,7 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
|
||||
func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
opts.KeepStandbysSealed = true
|
||||
var bridge *raft.ClusterAddrBridge
|
||||
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
// The same PhysicalFactory can be shared across multiple clusters.
|
||||
// The coreIdx == 0 check ensures that each time a new cluster is setup,
|
||||
// when setting up its first node we create a new ClusterAddrBridge.
|
||||
@@ -269,7 +269,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
}
|
||||
return bundle
|
||||
}
|
||||
opts.SetupFunc = func(t testing.T, c *vault.TestCluster) {
|
||||
opts.SetupFunc = func(t testing.TB, c *vault.TestCluster) {
|
||||
if opts.NumCores != 1 {
|
||||
RaftClusterJoinNodes(t, c)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/physical/raft"
|
||||
"github.com/hashicorp/vault/sdk/physical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// ReusableStorage is a physical backend that can be re-used across
|
||||
@@ -29,7 +29,7 @@ type ReusableStorage struct {
|
||||
// Cleanup should be called after a TestCluster is no longer
|
||||
// needed -- generally in a defer, just before the call to
|
||||
// cluster.Cleanup().
|
||||
Cleanup func(t testing.T, cluster *vault.TestCluster)
|
||||
Cleanup func(t testing.TB, cluster *vault.TestCluster)
|
||||
}
|
||||
|
||||
// StorageCleanup is a function that should be called once -- at the very end
|
||||
@@ -39,12 +39,12 @@ type StorageCleanup func()
|
||||
|
||||
// MakeReusableStorage makes a physical backend that can be re-used across
|
||||
// multiple test clusters in sequence.
|
||||
func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) {
|
||||
func MakeReusableStorage(t testing.TB, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) {
|
||||
storage := ReusableStorage{
|
||||
IsRaft: false,
|
||||
|
||||
Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
if coreIdx == 0 {
|
||||
// We intentionally do not clone the backend's Cleanup func,
|
||||
// because we don't want it to be run until the entire test has
|
||||
@@ -59,7 +59,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica
|
||||
},
|
||||
|
||||
// No-op
|
||||
Cleanup: func(t testing.T, cluster *vault.TestCluster) {},
|
||||
Cleanup: func(t testing.TB, cluster *vault.TestCluster) {},
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@@ -73,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica
|
||||
|
||||
// MakeReusableRaftStorage makes a physical raft backend that can be re-used
|
||||
// across multiple test clusters in sequence.
|
||||
func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) {
|
||||
func MakeReusableRaftStorage(t testing.TB, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) {
|
||||
raftDirs := make([]string, numCores)
|
||||
for i := 0; i < numCores; i++ {
|
||||
raftDirs[i] = makeRaftDir(t)
|
||||
@@ -85,13 +85,13 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re
|
||||
Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
conf.DisablePerformanceStandby = true
|
||||
opts.KeepStandbysSealed = true
|
||||
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false)
|
||||
}
|
||||
},
|
||||
|
||||
// Close open files being used by raft.
|
||||
Cleanup: func(t testing.T, cluster *vault.TestCluster) {
|
||||
Cleanup: func(t testing.TB, cluster *vault.TestCluster) {
|
||||
for i := 0; i < len(cluster.Cores); i++ {
|
||||
CloseRaftStorage(t, cluster, i)
|
||||
}
|
||||
@@ -108,14 +108,14 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (Re
|
||||
}
|
||||
|
||||
// CloseRaftStorage closes open files being used by raft.
|
||||
func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) {
|
||||
func CloseRaftStorage(t testing.TB, cluster *vault.TestCluster, idx int) {
|
||||
raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend)
|
||||
if err := raftStorage.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) {
|
||||
func MakeReusableRaftHAStorage(t testing.TB, logger hclog.Logger, numCores int, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) {
|
||||
raftDirs := make([]string, numCores)
|
||||
for i := 0; i < numCores; i++ {
|
||||
raftDirs[i] = makeRaftDir(t)
|
||||
@@ -125,7 +125,7 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b
|
||||
Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
|
||||
opts.InmemClusterLayers = true
|
||||
opts.KeepStandbysSealed = true
|
||||
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
opts.PhysicalFactory = func(t testing.TB, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
|
||||
haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true)
|
||||
|
||||
return &vault.PhysicalBackendBundle{
|
||||
@@ -136,7 +136,7 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b
|
||||
},
|
||||
|
||||
// Close open files being used by raft.
|
||||
Cleanup: func(t testing.T, cluster *vault.TestCluster) {
|
||||
Cleanup: func(t testing.TB, cluster *vault.TestCluster) {
|
||||
for _, core := range cluster.Cores {
|
||||
raftStorage := core.UnderlyingHAStorage.(*raft.RaftBackend)
|
||||
if err := raftStorage.Close(); err != nil {
|
||||
@@ -159,7 +159,7 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b
|
||||
return storage, cleanup
|
||||
}
|
||||
|
||||
func makeRaftDir(t testing.T) string {
|
||||
func makeRaftDir(t testing.TB) string {
|
||||
raftDir, err := ioutil.TempDir("", "vault-raft-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -168,7 +168,7 @@ func makeRaftDir(t testing.T) string {
|
||||
return raftDir
|
||||
}
|
||||
|
||||
func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle {
|
||||
func makeReusableRaftBackend(t testing.TB, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle {
|
||||
nodeID := fmt.Sprintf("core-%d", coreIdx)
|
||||
backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/builtin/logical/transit"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/forwarding"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
@@ -32,7 +31,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) {
|
||||
},
|
||||
}
|
||||
|
||||
cluster := vault.NewTestCluster(benchhelpers.TBtoT(b), coreConfig, &vault.TestClusterOptions{
|
||||
cluster := vault.NewTestCluster(b, coreConfig, &vault.TestClusterOptions{
|
||||
HandlerFunc: Handler,
|
||||
Logger: logging.NewVaultLoggerWithWriter(ioutil.Discard, log.Error),
|
||||
})
|
||||
@@ -42,7 +41,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) {
|
||||
|
||||
// make it easy to get access to the active
|
||||
core := cores[0].Core
|
||||
vault.TestWaitActive(benchhelpers.TBtoT(b), core)
|
||||
vault.TestWaitActive(b, core)
|
||||
|
||||
handler := cores[0].Handler
|
||||
host := fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[0].Listeners[0].Address.Port)
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
bplugin "github.com/hashicorp/vault/builtin/plugin"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
||||
@@ -48,7 +47,7 @@ func getPluginClusterAndCore(t *testing.T, logger log.Logger) (*vault.TestCluste
|
||||
PluginDirectory: pluginDir,
|
||||
}
|
||||
|
||||
cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), coreConfig, &vault.TestClusterOptions{
|
||||
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
||||
HandlerFunc: Handler,
|
||||
})
|
||||
cluster.Start()
|
||||
@@ -56,8 +55,8 @@ func getPluginClusterAndCore(t *testing.T, logger log.Logger) (*vault.TestCluste
|
||||
cores := cluster.Cores
|
||||
core := cores[0]
|
||||
|
||||
vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core)
|
||||
vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain",
|
||||
vault.TestWaitActive(t, core.Core)
|
||||
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain",
|
||||
[]string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)})
|
||||
|
||||
// Mount the mock plugin
|
||||
|
||||
@@ -53,7 +53,7 @@ type ExecDevClusterOptions struct {
|
||||
BaseListenAddress string
|
||||
}
|
||||
|
||||
func NewTestExecDevCluster(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster {
|
||||
func NewTestExecDevServer(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster {
|
||||
if opts == nil {
|
||||
opts = &ExecDevClusterOptions{}
|
||||
}
|
||||
@@ -141,12 +141,10 @@ func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDev
|
||||
clusterJsonPath := filepath.Join(dc.tmpDir, "cluster.json")
|
||||
args := []string{"server", "-dev", "-dev-cluster-json", clusterJsonPath}
|
||||
switch {
|
||||
case opts.NumCores == 3:
|
||||
args = append(args, "-dev-three-node")
|
||||
case opts.NumCores == 1:
|
||||
args = append(args, "-dev-tls")
|
||||
default:
|
||||
return fmt.Errorf("NumCores=1 and NumCores=3 are the only supported options right now")
|
||||
return fmt.Errorf("NumCores=1 is the only supported option right now")
|
||||
}
|
||||
if opts.BaseListenAddress != "" {
|
||||
args = append(args, "-dev-listen-address", opts.BaseListenAddress)
|
||||
@@ -223,6 +221,7 @@ func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDev
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,7 @@ func ToString(in any) string {
|
||||
|
||||
// StringOrDie renders its input using ToMap, and returns a string containing the
|
||||
// result. If rendering yields an error, calls t.Fatal.
|
||||
func StringOrDie(t testing.T, in any) string {
|
||||
func StringOrDie(t testing.TB, in any) string {
|
||||
t.Helper()
|
||||
m, err := ToMap(in)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,15 +6,15 @@ package logical
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// TestRequest is a helper to create a purely in-memory Request struct.
|
||||
func TestRequest(t testing.T, op Operation, path string) *Request {
|
||||
func TestRequest(t testing.TB, op Operation, path string) *Request {
|
||||
return &Request{
|
||||
Operation: op,
|
||||
Path: path,
|
||||
@@ -26,7 +26,7 @@ func TestRequest(t testing.T, op Operation, path string) *Request {
|
||||
|
||||
// TestStorage is a helper that can be used from unit tests to verify
|
||||
// the behavior of a Storage impl.
|
||||
func TestStorage(t testing.T, s Storage) {
|
||||
func TestStorage(t testing.TB, s Storage) {
|
||||
keys, err := s.List(context.Background(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("list error: %s", err)
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/armon/go-metrics"
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/fairshare"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
@@ -35,7 +34,7 @@ var testImagePull sync.Once
|
||||
|
||||
// mockExpiration returns a mock expiration manager
|
||||
func mockExpiration(t testing.TB) *ExpirationManager {
|
||||
c, _, _ := TestCoreUnsealed(benchhelpers.TBtoT(t))
|
||||
c, _, _ := TestCoreUnsealed(t)
|
||||
|
||||
// Wait until the expiration manager is out of restore mode.
|
||||
// This was added to prevent sporadic failures of TestExpiration_unrecoverableErrorMakesIrrevocable.
|
||||
@@ -51,7 +50,7 @@ func mockExpiration(t testing.TB) *ExpirationManager {
|
||||
}
|
||||
|
||||
func mockBackendExpiration(t testing.TB, backend physical.Backend) (*Core, *ExpirationManager) {
|
||||
c, _, _ := TestCoreUnsealedBackend(benchhelpers.TBtoT(t), backend)
|
||||
c, _, _ := TestCoreUnsealedBackend(t, backend)
|
||||
return c, c.expiration
|
||||
}
|
||||
|
||||
@@ -699,7 +698,7 @@ func BenchmarkExpiration_Restore_InMem(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, numLeases int) {
|
||||
c, _, _ := TestCoreUnsealedBackend(benchhelpers.TBtoT(b), physicalBackend)
|
||||
c, _, _ := TestCoreUnsealedBackend(b, physicalBackend)
|
||||
exp := c.expiration
|
||||
noop := &NoopBackend{}
|
||||
view := NewBarrierView(c.barrier, "logical/")
|
||||
@@ -768,7 +767,7 @@ func BenchmarkExpiration_Create_Leases(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
c, _, _ := TestCoreUnsealedBackend(benchhelpers.TBtoT(b), inm)
|
||||
c, _, _ := TestCoreUnsealedBackend(b, inm)
|
||||
exp := c.expiration
|
||||
noop := &NoopBackend{}
|
||||
view := NewBarrierView(c.barrier, "logical/")
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/hashicorp/vault/builtin/logical/database"
|
||||
"github.com/hashicorp/vault/builtin/logical/pki"
|
||||
"github.com/hashicorp/vault/builtin/logical/transit"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/builtinplugins"
|
||||
"github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
@@ -56,7 +55,7 @@ func testVaultServerUnseal(t testing.TB) (*api.Client, []string, func()) {
|
||||
func testVaultServerCoreConfig(t testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) {
|
||||
t.Helper()
|
||||
|
||||
cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), coreConfig, &vault.TestClusterOptions{
|
||||
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
|
||||
HandlerFunc: http.Handler,
|
||||
NumCores: 1,
|
||||
})
|
||||
@@ -64,7 +63,7 @@ func testVaultServerCoreConfig(t testing.TB, coreConfig *vault.CoreConfig) (*api
|
||||
|
||||
// Make it easy to get access to the active
|
||||
core := cluster.Cores[0].Core
|
||||
vault.TestWaitActive(benchhelpers.TBtoT(t), core)
|
||||
vault.TestWaitActive(t, core)
|
||||
|
||||
// Get the client already setup for us!
|
||||
client := cluster.Cores[0].Client
|
||||
|
||||
@@ -4,21 +4,21 @@
|
||||
package pprof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/sdk/helper/testcluster"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
func SysPprof_Test(t *testing.T, cluster testcluster.VaultCluster) {
|
||||
nodes := cluster.Nodes()
|
||||
if len(nodes) == 0 {
|
||||
t.Fatal("no nodes returned")
|
||||
}
|
||||
client := nodes[0].APIClient()
|
||||
|
||||
transport := cleanhttp.DefaultPooledTransport()
|
||||
@@ -87,7 +87,7 @@ func SysPprof_Test(t *testing.T, cluster testcluster.VaultCluster) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
httpRespBody, err := ioutil.ReadAll(resp.Body)
|
||||
httpRespBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -114,27 +114,3 @@ func SysPprof_Test(t *testing.T, cluster testcluster.VaultCluster) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func SysPprof_Standby_Test(t *testing.T, cluster testcluster.VaultCluster) {
|
||||
pprof := func(client *api.Client) (string, error) {
|
||||
req := client.NewRequest("GET", "/v1/sys/pprof/cmdline")
|
||||
resp, err := client.RawRequestWithContext(context.Background(), req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
cmdline, err := pprof(cluster.Nodes()[0].APIClient())
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, cmdline)
|
||||
t.Log(cmdline)
|
||||
|
||||
cmdline, err = pprof(cluster.Nodes()[1].APIClient())
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, cmdline)
|
||||
t.Log(cmdline)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestSysPprof_Exec(t *testing.T) {
|
||||
if binary == "" {
|
||||
t.Skip("only running exec test when $VAULT_BINARY present")
|
||||
}
|
||||
cluster := testcluster.NewTestExecDevCluster(t, &testcluster.ExecDevClusterOptions{
|
||||
cluster := testcluster.NewTestExecDevServer(t, &testcluster.ExecDevClusterOptions{
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
NumCores: 1,
|
||||
},
|
||||
@@ -32,27 +32,3 @@ func TestSysPprof_Exec(t *testing.T) {
|
||||
|
||||
pprof.SysPprof_Test(t, cluster)
|
||||
}
|
||||
|
||||
// TestSysPprof_Standby_Exec is the same as TestSysPprof_Standby, but using a Vault binary
|
||||
// running as -dev-three-node instead of a fake single node TestCluster. There's
|
||||
// no particular reason why TestSysPprof was chosen to validate that mechanism,
|
||||
// other than that it was fast and simple.
|
||||
func TestSysPprof_Standby_Exec(t *testing.T) {
|
||||
t.Parallel()
|
||||
binary := os.Getenv("VAULT_BINARY")
|
||||
if binary == "" {
|
||||
t.Skip("only running exec test when $VAULT_BINARY present")
|
||||
}
|
||||
cluster := testcluster.NewTestExecDevCluster(t, &testcluster.ExecDevClusterOptions{
|
||||
ClusterOptions: testcluster.ClusterOptions{
|
||||
VaultNodeConfig: &testcluster.VaultNodeConfig{
|
||||
DisablePerformanceStandby: true,
|
||||
},
|
||||
},
|
||||
BinaryPath: binary,
|
||||
BaseListenAddress: "127.0.0.1:8210",
|
||||
})
|
||||
defer cluster.Cleanup()
|
||||
|
||||
pprof.SysPprof_Standby_Test(t, cluster)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/internalshared/configutil"
|
||||
"github.com/hashicorp/vault/sdk/helper/testhelpers/schema"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
"golang.org/x/net/http2"
|
||||
@@ -86,22 +85,3 @@ func TestSysPprof_MaxRequestDuration(t *testing.T) {
|
||||
t.Fatalf("unexpected error returned: %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSysPprof_Standby(t *testing.T) {
|
||||
t.Parallel()
|
||||
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
|
||||
DisablePerformanceStandby: true,
|
||||
}, &vault.TestClusterOptions{
|
||||
HandlerFunc: vaulthttp.Handler,
|
||||
DefaultHandlerProperties: vault.HandlerProperties{
|
||||
ListenerConfig: &configutil.Listener{
|
||||
Profiling: configutil.ListenerProfiling{
|
||||
UnauthenticatedPProfAccess: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
defer cluster.Cleanup()
|
||||
|
||||
SysPprof_Standby_Test(t, cluster)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/api"
|
||||
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/constants"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
"github.com/hashicorp/vault/helper/testhelpers"
|
||||
@@ -105,8 +104,8 @@ func raftClusterBuilder(t testing.TB, ropts *RaftClusterOpts) (*vault.CoreConfig
|
||||
|
||||
func raftCluster(t testing.TB, ropts *RaftClusterOpts) (*vault.TestCluster, *vault.TestClusterOptions) {
|
||||
conf, opts := raftClusterBuilder(t, ropts)
|
||||
cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), conf, &opts)
|
||||
vault.TestWaitActive(benchhelpers.TBtoT(t), cluster.Cores[0].Core)
|
||||
cluster := vault.NewTestCluster(t, conf, &opts)
|
||||
vault.TestWaitActive(t, cluster.Cores[0].Core)
|
||||
return cluster, &opts
|
||||
}
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// TestAddTestPlugin registers the testFunc as part of the plugin command to the
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
// will be something like:
|
||||
// stderr (ignored by go-plugin): "testing: warning: no tests to run"
|
||||
// stdout: "PASS"
|
||||
func TestAddTestPlugin(t testing.T, pluginCatalog *PluginCatalog, name string, pluginType consts.PluginType, version string, testFunc string, env []string) {
|
||||
func TestAddTestPlugin(t testing.TB, pluginCatalog *PluginCatalog, name string, pluginType consts.PluginType, version string, testFunc string, env []string) {
|
||||
t.Helper()
|
||||
if pluginCatalog.directory == "" {
|
||||
t.Fatal("plugin catalog must have a plugin directory set to add plugins")
|
||||
|
||||
@@ -5,13 +5,13 @@ package vault
|
||||
|
||||
import (
|
||||
"context"
|
||||
testing "testing"
|
||||
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
vaultseal "github.com/hashicorp/vault/vault/seal"
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
func TestCoreUnsealedWithConfigs(t testing.T, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) {
|
||||
func TestCoreUnsealedWithConfigs(t testing.TB, barrierConf, recoveryConf *SealConfig) (*Core, [][]byte, [][]byte, string) {
|
||||
t.Helper()
|
||||
opts := &seal.TestSealOpts{}
|
||||
if recoveryConf == nil {
|
||||
@@ -20,7 +20,7 @@ func TestCoreUnsealedWithConfigs(t testing.T, barrierConf, recoveryConf *SealCon
|
||||
return TestCoreUnsealedWithConfigSealOpts(t, barrierConf, recoveryConf, opts)
|
||||
}
|
||||
|
||||
func TestCoreUnsealedWithConfigSealOpts(t testing.T, barrierConf, recoveryConf *SealConfig, sealOpts *seal.TestSealOpts) (*Core, [][]byte, [][]byte, string) {
|
||||
func TestCoreUnsealedWithConfigSealOpts(t testing.TB, barrierConf, recoveryConf *SealConfig, sealOpts *seal.TestSealOpts) (*Core, [][]byte, [][]byte, string) {
|
||||
t.Helper()
|
||||
seal := NewTestSeal(t, sealOpts)
|
||||
core := TestCoreWithSeal(t, seal, false)
|
||||
|
||||
@@ -4,15 +4,16 @@
|
||||
package vault
|
||||
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
"github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2"
|
||||
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
// NewTestSeal creates a new seal for testing. If you want to use the same seal multiple times, such as for
|
||||
// a cluster, use NewTestSealFunc instead.
|
||||
func NewTestSeal(t testing.T, opts *seal.TestSealOpts) Seal {
|
||||
func NewTestSeal(t testing.TB, opts *seal.TestSealOpts) Seal {
|
||||
t.Helper()
|
||||
opts = seal.NewTestSealOpts(opts)
|
||||
logger := corehelpers.NewTestLogger(t).Named("sealAccess")
|
||||
@@ -55,7 +56,7 @@ func NewTestSeal(t testing.T, opts *seal.TestSealOpts) Seal {
|
||||
|
||||
// NewTestSealFunc returns a function that creates seals. All such seals will have TestWrappers that
|
||||
// share the same secret, thus making them equivalent.
|
||||
func NewTestSealFunc(t testing.T, opts *seal.TestSealOpts) func() Seal {
|
||||
func NewTestSealFunc(t testing.TB, opts *seal.TestSealOpts) func() Seal {
|
||||
testSeal := NewTestSeal(t, opts)
|
||||
|
||||
return func() Seal {
|
||||
@@ -64,12 +65,12 @@ func NewTestSealFunc(t testing.T, opts *seal.TestSealOpts) func() Seal {
|
||||
}
|
||||
|
||||
// CloneTestSeal creates a new test seal that shares the same seal wrappers as `testSeal`.
|
||||
func cloneTestSeal(t testing.T, testSeal Seal) Seal {
|
||||
func cloneTestSeal(t testing.TB, testSeal Seal) Seal {
|
||||
logger := corehelpers.NewTestLogger(t).Named("sealAccess")
|
||||
|
||||
access, err := seal.NewAccessFromSealWrappers(logger, testSeal.GetAccess().Generation(), testSeal.GetAccess().GetSealGenerationInfo().IsRewrapped(), testSeal.GetAccess().GetAllSealWrappersByPriority())
|
||||
if err != nil {
|
||||
t.Fatal("error cloning seal %v", err)
|
||||
t.Fatalf("error cloning seal %v", err)
|
||||
}
|
||||
if testSeal.StoredKeysSupported() == seal.StoredKeysNotSupported {
|
||||
return NewDefaultSeal(access)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
@@ -54,7 +55,6 @@ import (
|
||||
"github.com/hashicorp/vault/vault/plugincatalog"
|
||||
"github.com/hashicorp/vault/vault/seal"
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
@@ -98,32 +98,32 @@ oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F
|
||||
)
|
||||
|
||||
// TestCore returns a pure in-memory, uninitialized core for testing.
|
||||
func TestCore(t testing.T) *Core {
|
||||
func TestCore(t testing.TB) *Core {
|
||||
return TestCoreWithSeal(t, nil, false)
|
||||
}
|
||||
|
||||
// TestCoreRaw returns a pure in-memory, uninitialized core for testing. The raw
|
||||
// storage endpoints are enabled with this core.
|
||||
func TestCoreRaw(t testing.T) *Core {
|
||||
func TestCoreRaw(t testing.TB) *Core {
|
||||
return TestCoreWithSeal(t, nil, true)
|
||||
}
|
||||
|
||||
// TestCoreNewSeal returns a pure in-memory, uninitialized core with
|
||||
// the new seal configuration.
|
||||
func TestCoreNewSeal(t testing.T) *Core {
|
||||
func TestCoreNewSeal(t testing.TB) *Core {
|
||||
seal := NewTestSeal(t, nil)
|
||||
return TestCoreWithSeal(t, seal, false)
|
||||
}
|
||||
|
||||
// TestCoreWithConfig returns a pure in-memory, uninitialized core with the
|
||||
// specified core configurations overridden for testing.
|
||||
func TestCoreWithConfig(t testing.T, conf *CoreConfig) *Core {
|
||||
func TestCoreWithConfig(t testing.TB, conf *CoreConfig) *Core {
|
||||
return TestCoreWithSealAndUI(t, conf)
|
||||
}
|
||||
|
||||
// TestCoreWithSeal returns a pure in-memory, uninitialized core with the
|
||||
// specified seal for testing.
|
||||
func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
|
||||
func TestCoreWithSeal(t testing.TB, testSeal Seal, enableRaw bool) *Core {
|
||||
conf := &CoreConfig{
|
||||
Seal: testSeal,
|
||||
EnableUI: false,
|
||||
@@ -138,7 +138,7 @@ func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
|
||||
return TestCoreWithSealAndUI(t, conf)
|
||||
}
|
||||
|
||||
func TestCoreWithDeadlockDetection(t testing.T, testSeal Seal, enableRaw bool) *Core {
|
||||
func TestCoreWithDeadlockDetection(t testing.TB, testSeal Seal, enableRaw bool) *Core {
|
||||
conf := &CoreConfig{
|
||||
Seal: testSeal,
|
||||
EnableUI: false,
|
||||
@@ -152,7 +152,7 @@ func TestCoreWithDeadlockDetection(t testing.T, testSeal Seal, enableRaw bool) *
|
||||
return TestCoreWithSealAndUI(t, conf)
|
||||
}
|
||||
|
||||
func TestCoreWithCustomResponseHeaderAndUI(t testing.T, CustomResponseHeaders map[string]map[string]string, enableUI bool) (*Core, [][]byte, string) {
|
||||
func TestCoreWithCustomResponseHeaderAndUI(t testing.TB, CustomResponseHeaders map[string]map[string]string, enableUI bool) (*Core, [][]byte, string) {
|
||||
confRaw := &server.Config{
|
||||
SharedConfig: &configutil.SharedConfig{
|
||||
Listeners: []*configutil.Listener{
|
||||
@@ -175,7 +175,7 @@ func TestCoreWithCustomResponseHeaderAndUI(t testing.T, CustomResponseHeaders ma
|
||||
return testCoreUnsealed(t, core)
|
||||
}
|
||||
|
||||
func TestCoreUI(t testing.T, enableUI bool) *Core {
|
||||
func TestCoreUI(t testing.TB, enableUI bool) *Core {
|
||||
conf := &CoreConfig{
|
||||
EnableUI: enableUI,
|
||||
EnableRaw: true,
|
||||
@@ -184,7 +184,7 @@ func TestCoreUI(t testing.T, enableUI bool) *Core {
|
||||
return TestCoreWithSealAndUI(t, conf)
|
||||
}
|
||||
|
||||
func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core {
|
||||
func TestCoreWithSealAndUI(t testing.TB, opts *CoreConfig) *Core {
|
||||
c := TestCoreWithSealAndUINoCleanup(t, opts)
|
||||
|
||||
t.Cleanup(func() {
|
||||
@@ -204,7 +204,7 @@ func TestCoreWithSealAndUI(t testing.T, opts *CoreConfig) *Core {
|
||||
return c
|
||||
}
|
||||
|
||||
func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
||||
func TestCoreWithSealAndUINoCleanup(t testing.TB, opts *CoreConfig) *Core {
|
||||
logger := corehelpers.NewTestLogger(t)
|
||||
physicalBackend, err := physInmem.NewInmem(nil, logger)
|
||||
if err != nil {
|
||||
@@ -271,7 +271,7 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
||||
return c
|
||||
}
|
||||
|
||||
func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
|
||||
func testCoreConfig(t testing.TB, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
|
||||
t.Helper()
|
||||
noopAudits := map[string]audit.Factory{
|
||||
"noop": audit.NoopAuditFactory(nil),
|
||||
@@ -322,13 +322,13 @@ func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Lo
|
||||
|
||||
// TestCoreInit initializes the core with a single key, and returns
|
||||
// the key that must be used to unseal the core and a root token.
|
||||
func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
|
||||
func TestCoreInit(t testing.TB, core *Core) ([][]byte, string) {
|
||||
t.Helper()
|
||||
secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil)
|
||||
return secretShares, root
|
||||
}
|
||||
|
||||
func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, handler http.Handler) ([][]byte, [][]byte, string) {
|
||||
func TestCoreInitClusterWrapperSetup(t testing.TB, core *Core, handler http.Handler) ([][]byte, [][]byte, string) {
|
||||
t.Helper()
|
||||
core.SetClusterHandler(handler)
|
||||
|
||||
@@ -377,7 +377,7 @@ func TestCoreSeal(core *Core) error {
|
||||
|
||||
// TestCoreUnsealed returns a pure in-memory core that is already
|
||||
// initialized and unsealed.
|
||||
func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
|
||||
func TestCoreUnsealed(t testing.TB) (*Core, [][]byte, string) {
|
||||
t.Helper()
|
||||
core := TestCore(t)
|
||||
return testCoreUnsealed(t, core)
|
||||
@@ -390,7 +390,7 @@ func SetupMetrics(conf *CoreConfig) *metrics.InmemSink {
|
||||
return inmemSink
|
||||
}
|
||||
|
||||
func TestCoreUnsealedWithMetrics(t testing.T) (*Core, [][]byte, string, *metrics.InmemSink) {
|
||||
func TestCoreUnsealedWithMetrics(t testing.TB) (*Core, [][]byte, string, *metrics.InmemSink) {
|
||||
t.Helper()
|
||||
conf := &CoreConfig{
|
||||
BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(),
|
||||
@@ -400,7 +400,7 @@ func TestCoreUnsealedWithMetrics(t testing.T) (*Core, [][]byte, string, *metrics
|
||||
return core, keys, root, sink
|
||||
}
|
||||
|
||||
func TestCoreUnsealedWithMetricsAndConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string, *metrics.InmemSink) {
|
||||
func TestCoreUnsealedWithMetricsAndConfig(t testing.TB, conf *CoreConfig) (*Core, [][]byte, string, *metrics.InmemSink) {
|
||||
t.Helper()
|
||||
conf.BuiltinRegistry = corehelpers.NewMockBuiltinRegistry()
|
||||
sink := SetupMetrics(conf)
|
||||
@@ -410,7 +410,7 @@ func TestCoreUnsealedWithMetricsAndConfig(t testing.T, conf *CoreConfig) (*Core,
|
||||
|
||||
// TestCoreUnsealedRaw returns a pure in-memory core that is already
|
||||
// initialized, unsealed, and with raw endpoints enabled.
|
||||
func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
|
||||
func TestCoreUnsealedRaw(t testing.TB) (*Core, [][]byte, string) {
|
||||
t.Helper()
|
||||
core := TestCoreRaw(t)
|
||||
return testCoreUnsealed(t, core)
|
||||
@@ -418,13 +418,13 @@ func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
|
||||
|
||||
// TestCoreUnsealedWithConfig returns a pure in-memory core that is already
|
||||
// initialized, unsealed, with the any provided core config values overridden.
|
||||
func TestCoreUnsealedWithConfig(t testing.T, conf *CoreConfig) (*Core, [][]byte, string) {
|
||||
func TestCoreUnsealedWithConfig(t testing.TB, conf *CoreConfig) (*Core, [][]byte, string) {
|
||||
t.Helper()
|
||||
core := TestCoreWithConfig(t, conf)
|
||||
return testCoreUnsealed(t, core)
|
||||
}
|
||||
|
||||
func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
|
||||
func testCoreUnsealed(t testing.TB, core *Core) (*Core, [][]byte, string) {
|
||||
t.Helper()
|
||||
token, keys := TestInitUnsealCore(t, core)
|
||||
|
||||
@@ -432,7 +432,7 @@ func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
|
||||
return core, keys, token
|
||||
}
|
||||
|
||||
func TestInitUnsealCore(t testing.T, core *Core) (string, [][]byte) {
|
||||
func TestInitUnsealCore(t testing.TB, core *Core) (string, [][]byte) {
|
||||
keys, token := TestCoreInit(t, core)
|
||||
for _, key := range keys {
|
||||
if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
|
||||
@@ -446,7 +446,7 @@ func TestInitUnsealCore(t testing.T, core *Core) (string, [][]byte) {
|
||||
return token, keys
|
||||
}
|
||||
|
||||
func testCoreAddSecretMount(t testing.T, core *Core, token, kvVersion string) {
|
||||
func testCoreAddSecretMount(t testing.TB, core *Core, token, kvVersion string) {
|
||||
kvReq := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
ClientToken: token,
|
||||
@@ -469,7 +469,7 @@ func testCoreAddSecretMount(t testing.T, core *Core, token, kvVersion string) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
|
||||
func TestCoreUnsealedBackend(t testing.TB, backend physical.Backend) (*Core, [][]byte, string) {
|
||||
t.Helper()
|
||||
logger := corehelpers.NewTestLogger(t)
|
||||
conf := testCoreConfig(t, backend, logger)
|
||||
@@ -539,7 +539,7 @@ func TestDynamicSystemView(c *Core, ns *namespace.Namespace) logical.SystemView
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddTestPlugin(t testing.T, core *Core, name string, pluginType consts.PluginType, version string, testFunc string, env []string) {
|
||||
func TestAddTestPlugin(t testing.TB, core *Core, name string, pluginType consts.PluginType, version string, testFunc string, env []string) {
|
||||
t.Helper()
|
||||
plugincatalog.TestAddTestPlugin(t, core.pluginCatalog, name, pluginType, version, testFunc, env)
|
||||
}
|
||||
@@ -648,14 +648,14 @@ func GenerateRandBytes(length int) ([]byte, error) {
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func TestWaitActive(t testing.T, core *Core) {
|
||||
func TestWaitActive(t testing.TB, core *Core) {
|
||||
t.Helper()
|
||||
if err := TestWaitActiveWithError(core); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWaitActiveForwardingReady(t testing.T, core *Core) {
|
||||
func TestWaitActiveForwardingReady(t testing.TB, core *Core) {
|
||||
t.Helper()
|
||||
TestWaitActive(t, core)
|
||||
|
||||
@@ -724,7 +724,7 @@ func (c *TestCluster) SetRootToken(token string) {
|
||||
func (c *TestCluster) Start() {
|
||||
}
|
||||
|
||||
func (c *TestCluster) start(t testing.T) {
|
||||
func (c *TestCluster) start(t testing.TB) {
|
||||
t.Helper()
|
||||
for i, core := range c.Cores {
|
||||
if core.Server != nil {
|
||||
@@ -788,14 +788,14 @@ WAITACTIVE:
|
||||
}
|
||||
|
||||
// UnsealCores uses the cluster barrier keys to unseal the test cluster cores
|
||||
func (c *TestCluster) UnsealCores(t testing.T) {
|
||||
func (c *TestCluster) UnsealCores(t testing.TB) {
|
||||
t.Helper()
|
||||
if err := c.UnsealCoresWithError(t, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TestCluster) UnsealCoresWithError(t testing.T, useStoredKeys bool) error {
|
||||
func (c *TestCluster) UnsealCoresWithError(t testing.TB, useStoredKeys bool) error {
|
||||
unseal := func(core *Core) error {
|
||||
for _, key := range c.BarrierKeys {
|
||||
if _, err := core.Unseal(TestKeyCopy(key)); err != nil {
|
||||
@@ -851,7 +851,7 @@ func (c *TestCluster) UnsealCoresWithError(t testing.T, useStoredKeys bool) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TestCluster) UnsealCore(t testing.T, core *TestClusterCore) {
|
||||
func (c *TestCluster) UnsealCore(t testing.TB, core *TestClusterCore) {
|
||||
err := c.AttemptUnsealCore(core)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -873,21 +873,21 @@ func (c *TestCluster) AttemptUnsealCore(core *TestClusterCore) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.T, core *TestClusterCore) {
|
||||
func (c *TestCluster) UnsealCoreWithStoredKeys(t testing.TB, core *TestClusterCore) {
|
||||
t.Helper()
|
||||
if err := core.UnsealWithStoredKeys(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TestCluster) EnsureCoresSealed(t testing.T) {
|
||||
func (c *TestCluster) EnsureCoresSealed(t testing.TB) {
|
||||
t.Helper()
|
||||
if err := c.ensureCoresSealed(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TestClusterCore) Seal(t testing.T) {
|
||||
func (c *TestClusterCore) Seal(t testing.TB) {
|
||||
t.Helper()
|
||||
if err := c.Core.sealInternal(); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1098,13 +1098,13 @@ type TestClusterOptions struct {
|
||||
// core in cluster will have 0, second 1, etc.
|
||||
// If the backend is shared across the cluster (i.e. is not Raft) then it
|
||||
// should return nil when coreIdx != 0.
|
||||
PhysicalFactory func(t testing.T, coreIdx int, logger log.Logger, conf map[string]interface{}) *PhysicalBackendBundle
|
||||
PhysicalFactory func(t testing.TB, coreIdx int, logger log.Logger, conf map[string]interface{}) *PhysicalBackendBundle
|
||||
// FirstCoreNumber is used to assign a unique number to each core within
|
||||
// a multi-cluster setup.
|
||||
FirstCoreNumber int
|
||||
RequireClientAuth bool
|
||||
// SetupFunc is called after the cluster is started.
|
||||
SetupFunc func(t testing.T, c *TestCluster)
|
||||
SetupFunc func(t testing.TB, c *TestCluster)
|
||||
PR1103Disabled bool
|
||||
|
||||
// ClusterLayers are used to override the default cluster connection layer
|
||||
@@ -1164,7 +1164,7 @@ type certInfo struct {
|
||||
// logger and will be the basis for each core's logger. If no opts.Logger is
|
||||
// given, one will be generated based on t.Name() for the cluster logger, and if
|
||||
// no base.Logger is given will also be used as the basis for each core's logger.
|
||||
func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
|
||||
func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
|
||||
var err error
|
||||
|
||||
if opts == nil {
|
||||
@@ -1698,7 +1698,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
||||
}
|
||||
|
||||
// StopCore performs an orderly shutdown of a core.
|
||||
func (cluster *TestCluster) StopCore(t testing.T, idx int) {
|
||||
func (cluster *TestCluster) StopCore(t testing.TB, idx int) {
|
||||
t.Helper()
|
||||
|
||||
if idx < 0 || idx > len(cluster.Cores) {
|
||||
@@ -1716,7 +1716,7 @@ func (cluster *TestCluster) StopCore(t testing.T, idx int) {
|
||||
cluster.cleanupFuncs[idx]()
|
||||
}
|
||||
|
||||
func GenerateListenerAddr(t testing.T, opts *TestClusterOptions, certIPs []net.IP) (*net.TCPAddr, []net.IP) {
|
||||
func GenerateListenerAddr(t testing.TB, opts *TestClusterOptions, certIPs []net.IP) (*net.TCPAddr, []net.IP) {
|
||||
var baseAddr *net.TCPAddr
|
||||
var err error
|
||||
|
||||
@@ -1738,7 +1738,7 @@ func GenerateListenerAddr(t testing.T, opts *TestClusterOptions, certIPs []net.I
|
||||
|
||||
// StartCore restarts a TestClusterCore that was stopped, by replacing the
|
||||
// underlying Core.
|
||||
func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOptions) {
|
||||
func (cluster *TestCluster) StartCore(t testing.TB, idx int, opts *TestClusterOptions) {
|
||||
t.Helper()
|
||||
|
||||
if idx < 0 || idx > len(cluster.Cores) {
|
||||
@@ -1795,7 +1795,7 @@ func (cluster *TestCluster) StartCore(t testing.T, idx int, opts *TestClusterOpt
|
||||
tcc.Logger().Info("restarted test core", "core", idx)
|
||||
}
|
||||
|
||||
func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey ed25519.PublicKey) (func(), *Core, CoreConfig, http.Handler) {
|
||||
func (testCluster *TestCluster) newCore(t testing.TB, idx int, coreConfig *CoreConfig, opts *TestClusterOptions, listeners []*TestListener, pubKey ed25519.PublicKey) (func(), *Core, CoreConfig, http.Handler) {
|
||||
localConfig := *coreConfig
|
||||
cleanupFunc := func() {}
|
||||
var handler http.Handler
|
||||
@@ -1933,7 +1933,7 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo
|
||||
}
|
||||
|
||||
func (testCluster *TestCluster) setupClusterListener(
|
||||
t testing.T, idx int, core *Core, coreConfig *CoreConfig,
|
||||
t testing.TB, idx int, core *Core, coreConfig *CoreConfig,
|
||||
opts *TestClusterOptions, listeners []*TestListener, handler http.Handler,
|
||||
) {
|
||||
if coreConfig.ClusterAddr == "" {
|
||||
@@ -1970,7 +1970,7 @@ func (testCluster *TestCluster) setupClusterListener(
|
||||
|
||||
// initCores attempts to initialize a core for a test cluster using the supplied
|
||||
// options.
|
||||
func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions) {
|
||||
func (tc *TestCluster) initCores(t testing.TB, opts *TestClusterOptions) {
|
||||
leader := tc.Cores[0]
|
||||
|
||||
bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, leader.Core, leader.Handler)
|
||||
@@ -2087,7 +2087,7 @@ func (tc *TestCluster) initCores(t testing.T, opts *TestClusterOptions) {
|
||||
}
|
||||
|
||||
func (testCluster *TestCluster) getAPIClient(
|
||||
t testing.T, opts *TestClusterOptions,
|
||||
t testing.TB, opts *TestClusterOptions,
|
||||
port int, tlsConfig *tls.Config,
|
||||
) *api.Client {
|
||||
transport := cleanhttp.DefaultPooledTransport()
|
||||
|
||||
@@ -7,13 +7,12 @@ package vault
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func GenerateTestLicenseKeys() (ed25519.PublicKey, ed25519.PrivateKey, error) { return nil, nil, nil }
|
||||
func testGetLicensingConfig(key ed25519.PublicKey) *LicensingConfig { return &LicensingConfig{} }
|
||||
func testExtraTestCoreSetup(testing.T, ed25519.PrivateKey, *TestClusterCore) {}
|
||||
func testExtraTestCoreSetup(testing.TB, ed25519.PrivateKey, *TestClusterCore) {}
|
||||
func testAdjustUnderlyingStorage(tcc *TestClusterCore) {
|
||||
tcc.UnderlyingStorage = tcc.physical
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/hashicorp/go-secure-stdlib/parseutil"
|
||||
"github.com/hashicorp/go-sockaddr"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/benchhelpers"
|
||||
"github.com/hashicorp/vault/helper/identity"
|
||||
"github.com/hashicorp/vault/helper/metricsutil"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
@@ -544,7 +543,7 @@ func testMakeServiceTokenViaBackend(t testing.TB, ts *TokenStore, root, client,
|
||||
|
||||
func testMakeTokenViaBackend(t testing.TB, ts *TokenStore, root, client, ttl string, policy []string, batch bool) {
|
||||
t.Helper()
|
||||
req := logical.TestRequest(benchhelpers.TBtoT(t), logical.UpdateOperation, "create")
|
||||
req := logical.TestRequest(t, logical.UpdateOperation, "create")
|
||||
req.ClientToken = root
|
||||
if batch {
|
||||
req.Data["type"] = "batch"
|
||||
@@ -654,7 +653,7 @@ func testMakeServiceTokenViaCore(t testing.TB, c *Core, root, client, ttl string
|
||||
}
|
||||
|
||||
func testMakeTokenViaCore(t testing.TB, c *Core, root, client, ttl, period string, policy []string, batch bool, outAuth *logical.Auth) {
|
||||
req := logical.TestRequest(benchhelpers.TBtoT(t), logical.UpdateOperation, "auth/token/create")
|
||||
req := logical.TestRequest(t, logical.UpdateOperation, "auth/token/create")
|
||||
req.ClientToken = root
|
||||
if batch {
|
||||
req.Data["type"] = "batch"
|
||||
@@ -1413,7 +1412,7 @@ func TestTokenStore_RevokeTree(t *testing.T) {
|
||||
// Revokes a given Token Store tree non recursively.
|
||||
// The second parameter refers to the depth of the tree.
|
||||
func testTokenStore_RevokeTree_NonRecursive(t testing.TB, depth uint64, injectCycles bool) {
|
||||
c, _, _ := TestCoreUnsealed(benchhelpers.TBtoT(t))
|
||||
c, _, _ := TestCoreUnsealed(t)
|
||||
ts := c.tokenStore
|
||||
root, children := buildTokenTree(t, ts, depth)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user