Add a -dev-three-node option for devs. (#3081)

This commit is contained in:
Jeff Mitchell
2017-07-31 11:28:06 -04:00
committed by GitHub
parent dd72c96dc8
commit c6615e1b51
18 changed files with 878 additions and 551 deletions

View File

@@ -33,31 +33,27 @@ func testVaultServerBackends(t testing.TB, backends map[string]logical.Factory)
LogicalBackends: backends,
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
for _, core := range cluster.Cores {
core.Handler.Handle("/", vaulthttp.Handler(core.Core))
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
// make it easy to get access to the active
core := cluster.Cores[0].Core
vault.TestWaitActive(t, core)
// Grab the root token
rootToken := cluster.Cores[0].Root
client := cluster.Cores[0].Client
client.SetToken(rootToken)
client.SetToken(cluster.RootToken)
// Sanity check
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data["id"].(string) != rootToken {
t.Fatalf("token mismatch: %#v vs %q", secret, rootToken)
if secret == nil || secret.Data["id"].(string) != cluster.RootToken {
t.Fatalf("token mismatch: %#v vs %q", secret, cluster.RootToken)
}
return client, func() { defer cluster.CloseListeners() }
return client, func() { defer cluster.Cleanup() }
}
// testPostgresDB creates a testing postgres database in a Docker container,

View File

@@ -12,7 +12,7 @@ import (
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/http"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/plugins/database/postgresql"
"github.com/hashicorp/vault/vault"
@@ -84,12 +84,13 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
},
}
cluster := vault.NewTestCluster(t, coreConfig, false)
cluster.StartListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
cores := cluster.Cores
cores[0].Handler.Handle("/", http.Handler(cores[0].Core))
cores[1].Handler.Handle("/", http.Handler(cores[1].Core))
cores[2].Handler.Handle("/", http.Handler(cores[2].Core))
os.Setenv(pluginutil.PluginCACertPEMEnv, string(cluster.CACertPEM))
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain")
@@ -102,7 +103,12 @@ func TestBackend_PluginMain(t *testing.T) {
return
}
content := []byte(vault.TestClusterCACert)
caPem := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPem == "" {
t.Fatal("CA cert not passed in")
}
content := []byte(caPem)
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
t.Fatal(err)
@@ -131,7 +137,7 @@ func TestBackend_config_connection(t *testing.T) {
var err error
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
@@ -194,7 +200,7 @@ func TestBackend_config_connection(t *testing.T) {
func TestBackend_basic(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
@@ -285,7 +291,7 @@ func TestBackend_basic(t *testing.T) {
func TestBackend_connectionCrud(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
@@ -430,7 +436,7 @@ func TestBackend_connectionCrud(t *testing.T) {
func TestBackend_roleCrud(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
@@ -540,7 +546,7 @@ func TestBackend_roleCrud(t *testing.T) {
}
func TestBackend_allowedRoles(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}

View File

@@ -8,7 +8,7 @@ import (
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/http"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/plugins"
"github.com/hashicorp/vault/vault"
@@ -73,14 +73,11 @@ func (m *mockPlugin) Close() error {
}
func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
coreConfig := &vault.CoreConfig{}
cluster := vault.NewTestCluster(t, coreConfig, false)
cluster.StartListeners()
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
cores := cluster.Cores
cores[0].Handler.Handle("/", http.Handler(cores[0].Core))
cores[1].Handler.Handle("/", http.Handler(cores[1].Core))
cores[2].Handler.Handle("/", http.Handler(cores[2].Core))
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_Main")
@@ -110,7 +107,7 @@ func TestPlugin_Main(t *testing.T) {
func TestPlugin_Initialize(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
dbRaw, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
if err != nil {
@@ -134,7 +131,7 @@ func TestPlugin_Initialize(t *testing.T) {
func TestPlugin_CreateUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
if err != nil {
@@ -174,7 +171,7 @@ func TestPlugin_CreateUser(t *testing.T) {
func TestPlugin_RenewUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
if err != nil {
@@ -208,7 +205,7 @@ func TestPlugin_RenewUser(t *testing.T) {
func TestPlugin_RevokeUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.CloseListeners()
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory("test-plugin", sys, &log.NullLogger{})
if err != nil {

View File

@@ -22,16 +22,14 @@ func TestTransit_Issue_2958(t *testing.T) {
},
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", vaulthttp.Handler(cores[0].Core))
cores[1].Handler.Handle("/", vaulthttp.Handler(cores[1].Core))
cores[2].Handler.Handle("/", vaulthttp.Handler(cores[2].Core))
vault.TestWaitActive(t, cores[0].Core)
client := cores[0].Client

View File

@@ -6,7 +6,7 @@ import (
"testing"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/http"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/plugin"
"github.com/hashicorp/vault/logical/plugin/mock"
@@ -38,7 +38,12 @@ func TestBackend_PluginMain(t *testing.T) {
return
}
content := []byte(vault.TestClusterCACert)
caPem := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPem == "" {
t.Fatal("CA cert not passed in")
}
content := []byte(caPem)
tmpfile, err := ioutil.TempFile("", "test-cacert")
if err != nil {
t.Fatal(err)
@@ -71,16 +76,12 @@ func TestBackend_PluginMain(t *testing.T) {
}
func testConfig(t *testing.T) (*logical.BackendConfig, func()) {
coreConfig := &vault.CoreConfig{}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
cores := cluster.Cores
cores[0].Handler.Handle("/", http.Handler(cores[0].Core))
cores[1].Handler.Handle("/", http.Handler(cores[1].Core))
cores[2].Handler.Handle("/", http.Handler(cores[2].Core))
core := cores[0]
sys := vault.TestDynamicSystemView(core.Core)
@@ -93,9 +94,11 @@ func testConfig(t *testing.T) (*logical.BackendConfig, func()) {
},
}
os.Setenv(pluginutil.PluginCACertPEMEnv, string(cluster.CACertPEM))
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain")
return config, func() {
cluster.CloseListeners()
cluster.Cleanup()
}
}

View File

@@ -3,17 +3,20 @@ package command
import (
"encoding/base64"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
"golang.org/x/net/http2"
@@ -35,6 +38,7 @@ import (
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/mlock"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/reload"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/meta"
@@ -56,16 +60,17 @@ type ServerCommand struct {
meta.Meta
logGate *gatedwriter.Writer
logger log.Logger
cleanupGuard sync.Once
reloadFuncsLock *sync.RWMutex
reloadFuncs *map[string][]vault.ReloadFunc
reloadFuncs *map[string][]reload.ReloadFunc
}
func (c *ServerCommand) Run(args []string) int {
var dev, verifyOnly, devHA, devTransactional, devLeasedGeneric bool
var dev, verifyOnly, devHA, devTransactional, devLeasedGeneric, devThreeNode bool
var configPath []string
var logLevel, devRootTokenID, devListenAddress string
flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
@@ -77,6 +82,7 @@ func (c *ServerCommand) Run(args []string) int {
flags.BoolVar(&devHA, "dev-ha", false, "")
flags.BoolVar(&devTransactional, "dev-transactional", false, "")
flags.BoolVar(&devLeasedGeneric, "dev-leased-generic", false, "")
flags.BoolVar(&devThreeNode, "dev-three-node", false, "")
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
if err := flags.Parse(args); err != nil {
@@ -85,7 +91,7 @@ func (c *ServerCommand) Run(args []string) int {
// Create a logger. We wrap it in a gated writer so that it doesn't
// start logging too early.
logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
var level int
logLevel = strings.ToLower(strings.TrimSpace(logLevel))
switch logLevel {
@@ -112,9 +118,9 @@ func (c *ServerCommand) Run(args []string) int {
}
switch strings.ToLower(logFormat) {
case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level)
default:
c.logger = log.NewLogger(logGate, "vault")
c.logger = log.NewLogger(c.logGate, "vault")
c.logger.SetLevel(level)
}
grpclog.SetLogger(&grpclogFaker{
@@ -129,7 +135,7 @@ func (c *ServerCommand) Run(args []string) int {
devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
}
if devHA || devTransactional || devLeasedGeneric {
if devHA || devTransactional || devLeasedGeneric || devThreeNode {
dev = true
}
@@ -250,6 +256,10 @@ func (c *ServerCommand) Run(args []string) int {
}
}
if devThreeNode {
return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, devListenAddress)
}
var disableClustering bool
// Initialize the separate HA storage backend, if it exists
@@ -422,7 +432,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.reloadFuncsLock.Lock()
lns := make([]net.Listener, 0, len(config.Listeners))
for i, lnConfig := range config.Listeners {
ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate)
ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logGate)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing listener of type %s: %s",
@@ -567,7 +577,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
// If we're in Dev mode, then initialize the core
if dev {
init, err := c.enableDev(core, devRootTokenID)
init, err := c.enableDev(core, coreConfig)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing Dev mode: %s", err))
@@ -618,7 +628,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
// Release the log gate.
logGate.Flush()
c.logGate.Flush()
// Wait for shutdown
shutdownTriggered := false
@@ -642,7 +652,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
case <-c.SighupCh:
c.Ui.Output("==> Vault reload triggered")
if err := c.Reload(configPath); err != nil {
if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, configPath); err != nil {
c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
}
}
@@ -653,7 +663,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
return 0
}
func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.InitResult, error) {
func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
// Initialize it with a basic single key
init, err := core.Initialize(&vault.InitParams{
BarrierConfig: &vault.SealConfig{
@@ -700,14 +710,14 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
}
}
if rootTokenID != "" {
if coreConfig.DevToken != "" {
req := &logical.Request{
ID: "dev-gen-root",
Operation: logical.UpdateOperation,
ClientToken: init.RootToken,
Path: "auth/token/create",
Data: map[string]interface{}{
"id": rootTokenID,
"id": coreConfig.DevToken,
"policies": []string{"root"},
"no_parent": true,
"no_default_policy": true,
@@ -715,13 +725,13 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
}
resp, err := core.HandleRequest(req)
if err != nil {
return nil, fmt.Errorf("failed to create root token with ID %s: %s", rootTokenID, err)
return nil, fmt.Errorf("failed to create root token with ID %s: %s", coreConfig.DevToken, err)
}
if resp == nil {
return nil, fmt.Errorf("nil response when creating root token with ID %s", rootTokenID)
return nil, fmt.Errorf("nil response when creating root token with ID %s", coreConfig.DevToken)
}
if resp.Auth == nil {
return nil, fmt.Errorf("nil auth when creating root token with ID %s", rootTokenID)
return nil, fmt.Errorf("nil auth when creating root token with ID %s", coreConfig.DevToken)
}
init.RootToken = resp.Auth.ClientToken
@@ -747,6 +757,168 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
return init, nil
}
func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress string) int {
testCluster := vault.NewTestCluster(&testing.T{}, base, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
BaseListenAddress: devListenAddress,
})
defer c.cleanupGuard.Do(testCluster.Cleanup)
info["cluster parameters path"] = testCluster.TempDir
info["log level"] = "trace"
infoKeys = append(infoKeys, "cluster parameters path", "log level")
for i, core := range testCluster.Cores {
info[fmt.Sprintf("node %d redirect address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
infoKeys = append(infoKeys, fmt.Sprintf("node %d redirect address", i))
}
infoKeys = append(infoKeys, "version")
verInfo := version.GetVersion()
info["version"] = verInfo.FullVersionNumber(false)
if verInfo.Revision != "" {
info["version sha"] = strings.Trim(verInfo.Revision, "'")
infoKeys = append(infoKeys, "version sha")
}
infoKeys = append(infoKeys, "cgo")
info["cgo"] = "disabled"
if version.CgoEnabled {
info["cgo"] = "enabled"
}
// Server configuration output
padding := 24
sort.Strings(infoKeys)
c.Ui.Output("==> Vault server configuration:\n")
for _, k := range infoKeys {
c.Ui.Output(fmt.Sprintf(
"%s%s: %s",
strings.Repeat(" ", padding-len(k)),
strings.Title(k),
info[k]))
}
c.Ui.Output("")
for _, core := range testCluster.Cores {
core.Server.Handler = vaulthttp.Handler(core.Core)
core.SetClusterHandler(core.Server.Handler)
}
testCluster.Start()
if base.DevToken != "" {
req := &logical.Request{
ID: "dev-gen-root",
Operation: logical.UpdateOperation,
ClientToken: testCluster.RootToken,
Path: "auth/token/create",
Data: map[string]interface{}{
"id": base.DevToken,
"policies": []string{"root"},
"no_parent": true,
"no_default_policy": true,
},
}
resp, err := testCluster.Cores[0].HandleRequest(req)
if err != nil {
c.Ui.Output(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
return 1
}
if resp == nil {
c.Ui.Output(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
return 1
}
if resp.Auth == nil {
c.Ui.Output(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
return 1
}
testCluster.RootToken = resp.Auth.ClientToken
req.ID = "dev-revoke-init-root"
req.Path = "auth/token/revoke-self"
req.Data = nil
resp, err = testCluster.Cores[0].HandleRequest(req)
if err != nil {
c.Ui.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
return 1
}
}
// Set the token
tokenHelper, err := c.TokenHelper()
if err != nil {
c.Ui.Output(fmt.Sprintf("%v", err))
return 1
}
if err := tokenHelper.Store(testCluster.RootToken); err != nil {
c.Ui.Output(fmt.Sprintf("%v", err))
return 1
}
if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil {
c.Ui.Output(fmt.Sprintf("%v", err))
return 1
}
c.Ui.Output(fmt.Sprintf(
"==> Three node dev mode is enabled\n\n" +
"The unseal key and root token are reproduced below in case you\n" +
"want to seal/unseal the Vault or play with authentication.\n",
))
for i, key := range testCluster.BarrierKeys {
c.Ui.Output(fmt.Sprintf(
"Unseal Key %d: %s",
i, base64.StdEncoding.EncodeToString(key),
))
}
c.Ui.Output(fmt.Sprintf(
"\nRoot Token: %s\n", testCluster.RootToken,
))
// Output the header that the server has started
c.Ui.Output("==> Vault server started! Log data will stream in below:\n")
// Release the log gate.
c.logGate.Flush()
// Wait for shutdown
shutdownTriggered := false
for !shutdownTriggered {
select {
case <-c.ShutdownCh:
c.Ui.Output("==> Vault shutdown triggered")
// Stop the listners so that we don't process further client requests.
c.cleanupGuard.Do(testCluster.Cleanup)
// Shutdown will wait until after Vault is sealed, which means the
// request forwarding listeners will also be closed (and also
// waited for).
for _, core := range testCluster.Cores {
if err := core.Shutdown(); err != nil {
c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
}
}
shutdownTriggered = true
case <-c.SighupCh:
c.Ui.Output("==> Vault reload triggered")
for _, core := range testCluster.Cores {
if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
}
}
}
}
return 0
}
// detectRedirect is used to attempt redirect address detection
func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
config *server.Config) (string, error) {
@@ -921,51 +1093,24 @@ func (c *ServerCommand) setupTelemetry(config *server.Config) error {
return nil
}
func (c *ServerCommand) Reload(configPath []string) error {
c.reloadFuncsLock.RLock()
defer c.reloadFuncsLock.RUnlock()
func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error {
lock.RLock()
defer lock.RUnlock()
var reloadErrors *multierror.Error
// Read the new config
var config *server.Config
for _, path := range configPath {
current, err := server.LoadConfig(path, c.logger)
if err != nil {
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error loading configuration from %s: %s", path, err))
goto audit
}
if config == nil {
config = current
} else {
config = config.Merge(current)
}
}
// Ensure at least one config was found.
if config == nil {
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("No configuration files found"))
goto audit
}
// Call reload on the listeners. This will call each listener with each
// config block, but they verify the address.
for _, lnConfig := range config.Listeners {
for _, relFunc := range (*c.reloadFuncs)["listener|"+lnConfig.Type] {
if err := relFunc(lnConfig.Config); err != nil {
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading configuration: %s", err))
goto audit
for k, relFuncs := range *reloadFuncs {
switch {
case strings.HasPrefix(k, "listener|"):
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(nil); err != nil {
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading listener: %v", err))
}
}
}
audit:
// file audit reload funcs
for k, relFuncs := range *c.reloadFuncs {
if !strings.HasPrefix(k, "audit_file|") {
continue
}
case strings.HasPrefix(k, "audit_file|"):
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(nil); err != nil {
@@ -974,6 +1119,7 @@ audit:
}
}
}
}
return reloadErrors.ErrorOrNil()
}

View File

@@ -69,9 +69,6 @@ func DevConfig(ha, transactional bool) *Config {
EnableUI: true,
Telemetry: &Telemetry{},
MaxLeaseTTL: 32 * 24 * time.Hour,
DefaultLeaseTTL: 32 * 24 * time.Hour,
}
switch {

View File

@@ -8,15 +8,14 @@ import (
"fmt"
"io"
"net"
"sync"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/helper/tlsutil"
"github.com/hashicorp/vault/vault"
)
// ListenerFactory is the factory function to create a listener.
type ListenerFactory func(map[string]interface{}, io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error)
type ListenerFactory func(map[string]interface{}, io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error)
// BuiltinListeners is the list of built-in listener types.
var BuiltinListeners = map[string]ListenerFactory{
@@ -25,7 +24,7 @@ var BuiltinListeners = map[string]ListenerFactory{
// NewListener creates a new listener of the given type with the given
// configuration. The type is looked up in the BuiltinListeners map.
func NewListener(t string, config map[string]interface{}, logger io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
func NewListener(t string, config map[string]interface{}, logger io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
f, ok := BuiltinListeners[t]
if !ok {
return nil, nil, nil, fmt.Errorf("unknown listener type: %s", t)
@@ -37,7 +36,7 @@ func NewListener(t string, config map[string]interface{}, logger io.Writer) (net
func listenerWrapTLS(
ln net.Listener,
props map[string]string,
config map[string]interface{}) (net.Listener, map[string]string, vault.ReloadFunc, error) {
config map[string]interface{}) (net.Listener, map[string]string, reload.ReloadFunc, error) {
props["tls"] = "disabled"
if v, ok := config["tls_disable"]; ok {
@@ -60,16 +59,9 @@ func listenerWrapTLS(
return nil, nil, nil, fmt.Errorf("'tls_key_file' must be set")
}
addrRaw, ok := config["address"]
if !ok {
return nil, nil, nil, fmt.Errorf("'address' must be set")
}
addr := addrRaw.(string)
cg := &certificateGetter{
id: addr,
}
cg := reload.NewCertificateGetter(config["tls_cert_file"].(string), config["tls_key_file"].(string))
if err := cg.reload(config); err != nil {
if err := cg.Reload(config); err != nil {
return nil, nil, nil, fmt.Errorf("error loading TLS cert: %s", err)
}
@@ -82,7 +74,7 @@ func listenerWrapTLS(
}
tlsConf := &tls.Config{}
tlsConf.GetCertificate = cg.getCertificate
tlsConf.GetCertificate = cg.GetCertificate
tlsConf.NextProtos = []string{"h2", "http/1.1"}
tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers]
if !ok {
@@ -116,42 +108,5 @@ func listenerWrapTLS(
ln = tls.NewListener(ln, tlsConf)
props["tls"] = "enabled"
return ln, props, cg.reload, nil
}
type certificateGetter struct {
sync.RWMutex
cert *tls.Certificate
id string
}
func (cg *certificateGetter) reload(config map[string]interface{}) error {
if config["address"].(string) != cg.id {
return nil
}
cert, err := tls.LoadX509KeyPair(config["tls_cert_file"].(string), config["tls_key_file"].(string))
if err != nil {
return err
}
cg.Lock()
defer cg.Unlock()
cg.cert = &cert
return nil
}
func (cg *certificateGetter) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cg.RLock()
defer cg.RUnlock()
if cg.cert == nil {
return nil, fmt.Errorf("nil certificate")
}
return cg.cert, nil
return ln, props, cg.Reload, nil
}

View File

@@ -6,10 +6,10 @@ import (
"strings"
"time"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/helper/reload"
)
func tcpListenerFactory(config map[string]interface{}, _ io.Writer) (net.Listener, map[string]string, vault.ReloadFunc, error) {
func tcpListenerFactory(config map[string]interface{}, _ io.Writer) (net.Listener, map[string]string, reload.ReloadFunc, error) {
bind_proto := "tcp"
var addr string
addrRaw, ok := config["address"]

View File

@@ -58,8 +58,8 @@ disable_mlock = true
listener "tcp" {
address = "127.0.0.1:8203"
tls_cert_file = "TMPDIR/reload_FILE.pem"
tls_key_file = "TMPDIR/reload_FILE.key"
tls_cert_file = "TMPDIR/reload_cert.pem"
tls_key_file = "TMPDIR/reload_key.pem"
}
`
)
@@ -79,15 +79,11 @@ func TestServer_ReloadListener(t *testing.T) {
// Setup initial certs
inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem")
ioutil.WriteFile(td+"/reload_foo.pem", inBytes, 0777)
ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key")
ioutil.WriteFile(td+"/reload_foo.key", inBytes, 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
ioutil.WriteFile(td+"/reload_bar.pem", inBytes, 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
ioutil.WriteFile(td+"/reload_bar.key", inBytes, 0777)
ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
relhcl := strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "foo", -1)
relhcl := strings.Replace(reloadhcl, "TMPDIR", td, -1)
ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem")
@@ -155,7 +151,11 @@ func TestServer_ReloadListener(t *testing.T) {
t.Fatalf("certificate name didn't check out: %s", err)
}
relhcl = strings.Replace(strings.Replace(reloadhcl, "TMPDIR", td, -1), "FILE", "bar", -1)
relhcl = strings.Replace(reloadhcl, "TMPDIR", td, -1)
inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem")
ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0777)
inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key")
ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0777)
ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0777)
c.SighupCh <- struct{}{}

View File

@@ -25,6 +25,10 @@ var (
// PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the
// plugin.
PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN"
// PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded
// string. Used for testing.
PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM"
)
// generateCert is used internally to create certificates for the plugin

54
helper/reload/reload.go Normal file
View File

@@ -0,0 +1,54 @@
package reload
import (
"crypto/tls"
"fmt"
"sync"
)
// ReloadFunc are functions that are called when a reload is requested
type ReloadFunc func(map[string]interface{}) error
// CertificateGetter satisfies ReloadFunc and its GetCertificate method
// satisfies the tls.GetCertificate function signature. Currently it does not
// allow changing paths after the fact.
type CertificateGetter struct {
sync.RWMutex
cert *tls.Certificate
certFile string
keyFile string
}
func NewCertificateGetter(certFile, keyFile string) *CertificateGetter {
return &CertificateGetter{
certFile: certFile,
keyFile: keyFile,
}
}
func (cg *CertificateGetter) Reload(_ map[string]interface{}) error {
cert, err := tls.LoadX509KeyPair(cg.certFile, cg.keyFile)
if err != nil {
return err
}
cg.Lock()
defer cg.Unlock()
cg.cert = &cert
return nil
}
func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cg.RLock()
defer cg.RUnlock()
if cg.cert == nil {
return nil, fmt.Errorf("nil certificate")
}
return cg.cert, nil
}

View File

@@ -33,21 +33,17 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
ClusterAddr: "https://127.3.4.1:8382",
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
root := cores[0].Root
addrs := []string{
fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
@@ -62,7 +58,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
if err != nil {
t.Fatal(err)
}
client.SetToken(root)
client.SetToken(cluster.RootToken)
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
@@ -71,7 +67,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) {
if secret == nil {
t.Fatal("secret is nil")
}
if secret.Data["id"].(string) != root {
if secret.Data["id"].(string) != cluster.RootToken {
t.Fatal("token mismatch")
}
}
@@ -85,21 +81,17 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
ClusterAddr: "empty",
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
root := cores[0].Root
addrs := []string{
fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port),
fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port),
@@ -114,7 +106,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
if err != nil {
t.Fatal(err)
}
client.SetToken(root)
client.SetToken(cluster.RootToken)
secret, err := client.Auth().Token().LookupSelf()
if err != nil {
@@ -123,7 +115,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) {
if secret == nil {
t.Fatal("secret is nil")
}
if secret.Data["id"].(string) != root {
if secret.Data["id"].(string) != cluster.RootToken {
t.Fatal("token mismatch")
}
}
@@ -146,21 +138,17 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64)
},
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
root := cores[0].Root
wg := sync.WaitGroup{}
funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"}
@@ -191,7 +179,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64)
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, root)
req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -240,7 +228,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64)
if err != nil {
return nil, err
}
req.Header.Set(AuthHeaderName, root)
req.Header.Set(AuthHeaderName, cluster.RootToken)
resp, err := client.Do(req)
if err != nil {
return nil, err
@@ -454,21 +442,17 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
},
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
root := cores[0].Root
transport := cleanhttp.DefaultTransport()
transport.TLSClientConfig = cores[0].TLSConfig
if err := http2.ConfigureTransport(transport); err != nil {
@@ -484,7 +468,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, root)
req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -495,7 +479,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
Policies string `json:"policies"`
}
encodedCertConfig, err := json.Marshal(&certConfig{
Certificate: vault.TestClusterCACert,
Certificate: string(cluster.CACertPEM),
Policies: "default",
})
if err != nil {
@@ -506,7 +490,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, root)
req.Header.Set(AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@@ -529,7 +513,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
},
}
//cores[0].Logger().Printf("root token is %s", root)
//cores[0].Logger().Printf("cluster.RootToken token is %s", cluster.RootToken)
//time.Sleep(4 * time.Hour)
for _, addr := range addrs {
@@ -567,15 +551,13 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
}
func TestHTTP_Forwarding_HelpOperation(t *testing.T) {
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, true)
defer cluster.CloseListeners()
cluster.StartListeners()
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
vault.TestWaitActive(t, cores[0].Core)
testHelp := func(client *api.Client) {

View File

@@ -13,25 +13,20 @@ import (
// Test wrapping functionality
func TestHTTP_Wrapping(t *testing.T) {
coreConfig := &vault.CoreConfig{}
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
HandlerFunc: Handler,
})
cluster.Start()
defer cluster.Cleanup()
// Chicken-and-egg: Handler needs a core. So we create handlers first, then
// add routes chained to a Handler-created handler.
cluster := vault.NewTestCluster(t, coreConfig, true)
defer cluster.CloseListeners()
cluster.StartListeners()
cores := cluster.Cores
cores[0].Handler.Handle("/", Handler(cores[0].Core))
cores[1].Handler.Handle("/", Handler(cores[1].Core))
cores[2].Handler.Handle("/", Handler(cores[2].Core))
// make it easy to get access to the active
core := cores[0].Core
vault.TestWaitActive(t, core)
root := cores[0].Root
client := cores[0].Client
client.SetToken(root)
client.SetToken(cluster.RootToken)
// Write a value that we will use with wrapping for lookup
_, err := client.Logical().Write("secret/foo", map[string]interface{}{
@@ -73,7 +68,7 @@ func TestHTTP_Wrapping(t *testing.T) {
// Second: basic things that should fail, unwrap edition
// Root token isn't a wrapping token
_, err = client.Logical().Unwrap(root)
_, err = client.Logical().Unwrap(cluster.RootToken)
if err == nil {
t.Fatal("expected error")
}
@@ -162,7 +157,7 @@ func TestHTTP_Wrapping(t *testing.T) {
}
// Create a wrapping token
client.SetToken(root)
client.SetToken(cluster.RootToken)
secret, err = client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
@@ -212,7 +207,7 @@ func TestHTTP_Wrapping(t *testing.T) {
}
// Create a wrapping token
client.SetToken(root)
client.SetToken(cluster.RootToken)
secret, err = client.Logical().Read("secret/foo")
if err != nil {
t.Fatal(err)
@@ -264,7 +259,7 @@ func TestHTTP_Wrapping(t *testing.T) {
// Custom wrapping
//
client.SetToken(root)
client.SetToken(cluster.RootToken)
data := map[string]interface{}{
"zip": "zap",
"three": json.Number("2"),

View File

@@ -85,13 +85,13 @@ func TestCluster_ListenForRequests(t *testing.T) {
// Make this nicer for tests
manualStepDownSleepPeriod = 5 * time.Second
cluster := NewTestCluster(t, nil, false)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := NewTestCluster(t, nil, &TestClusterOptions{
KeepStandbysSealed: true,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
root := cores[0].Root
// Wait for core to become active
TestWaitActive(t, cores[0].Core)
@@ -115,16 +115,16 @@ func TestCluster_ListenForRequests(t *testing.T) {
t.Fatalf("%s not a TCP port", tcpAddr.String())
}
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+100), tlsConfig)
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", tcpAddr.IP.String(), tcpAddr.Port+105), tlsConfig)
if err != nil {
if expectFail {
t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+100)
t.Logf("testing %s:%d unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
continue
}
t.Fatalf("error: %v\nlisteners are\n%#v\n%#v\n", err, cores[0].Listeners[0], cores[0].Listeners[1])
}
if expectFail {
t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+100)
t.Fatalf("testing %s:%d not unsuccessful as expected", tcpAddr.IP.String(), tcpAddr.Port+105)
}
err = conn.Handshake()
if err != nil {
@@ -137,7 +137,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
case connState.NegotiatedProtocol != "h2" || !connState.NegotiatedProtocolIsMutual:
t.Fatal("bad protocol negotiation")
}
t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+100)
t.Logf("testing %s:%d successful", tcpAddr.IP.String(), tcpAddr.Port+105)
}
}
@@ -147,7 +147,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
err := cores[0].StepDown(&logical.Request{
Operation: logical.UpdateOperation,
Path: "sys/step-down",
ClientToken: root,
ClientToken: cluster.RootToken,
})
if err != nil {
t.Fatal(err)
@@ -162,7 +162,7 @@ func TestCluster_ListenForRequests(t *testing.T) {
time.Sleep(manualStepDownSleepPeriod)
checkListenersFunc(false)
err = cores[0].Seal(root)
err = cores[0].Seal(cluster.RootToken)
if err != nil {
t.Fatal(err)
}
@@ -179,54 +179,35 @@ func TestCluster_ForwardRequests(t *testing.T) {
}
func testCluster_ForwardRequestsCommon(t *testing.T) {
handler1 := http.NewServeMux()
handler1.HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(201)
w.Write([]byte("core1"))
})
handler2 := http.NewServeMux()
handler2.HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(202)
w.Write([]byte("core2"))
})
handler3 := http.NewServeMux()
handler3.HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(203)
w.Write([]byte("core3"))
})
cluster := NewTestCluster(t, nil, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := NewTestCluster(t, nil, nil)
cores := cluster.Cores
cores[0].Handler.HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
cores[0].Handler.(*http.ServeMux).HandleFunc("/core1", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(201)
w.Write([]byte("core1"))
})
cores[1].Handler.HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
cores[1].Handler.(*http.ServeMux).HandleFunc("/core2", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(202)
w.Write([]byte("core2"))
})
cores[2].Handler.HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
cores[2].Handler.(*http.ServeMux).HandleFunc("/core3", func(w http.ResponseWriter, req *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.WriteHeader(203)
w.Write([]byte("core3"))
})
cluster.Start()
defer cluster.Cleanup()
root := cores[0].Root
root := cluster.RootToken
// Wait for core to become active
TestWaitActive(t, cores[0].Core)
// Test forwarding a request. Since we're going directly from core to core
// with no fallback we know that if it worked, request handling is working
testCluster_ForwardRequests(t, cores[1], "core1")
testCluster_ForwardRequests(t, cores[2], "core1")
testCluster_ForwardRequests(t, cores[1], root, "core1")
testCluster_ForwardRequests(t, cores[2], root, "core1")
//
// Now we do a bunch of round-robining. The point is to make sure that as
@@ -251,8 +232,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[1].Core)
testCluster_ForwardRequests(t, cores[0], "core2")
testCluster_ForwardRequests(t, cores[2], "core2")
testCluster_ForwardRequests(t, cores[0], root, "core2")
testCluster_ForwardRequests(t, cores[2], root, "core2")
// Ensure active core is cores[2] and test
err = cores[1].StepDown(&logical.Request{
@@ -271,8 +252,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[2].Core)
testCluster_ForwardRequests(t, cores[0], "core3")
testCluster_ForwardRequests(t, cores[1], "core3")
testCluster_ForwardRequests(t, cores[0], root, "core3")
testCluster_ForwardRequests(t, cores[1], root, "core3")
// Ensure active core is cores[0] and test
err = cores[2].StepDown(&logical.Request{
@@ -291,8 +272,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[0].Core)
testCluster_ForwardRequests(t, cores[1], "core1")
testCluster_ForwardRequests(t, cores[2], "core1")
testCluster_ForwardRequests(t, cores[1], root, "core1")
testCluster_ForwardRequests(t, cores[2], root, "core1")
// Ensure active core is cores[1] and test
err = cores[0].StepDown(&logical.Request{
@@ -311,8 +292,8 @@ func testCluster_ForwardRequestsCommon(t *testing.T) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[1].Core)
testCluster_ForwardRequests(t, cores[0], "core2")
testCluster_ForwardRequests(t, cores[2], "core2")
testCluster_ForwardRequests(t, cores[0], root, "core2")
testCluster_ForwardRequests(t, cores[2], root, "core2")
// Ensure active core is cores[2] and test
err = cores[1].StepDown(&logical.Request{
@@ -331,11 +312,11 @@ func testCluster_ForwardRequestsCommon(t *testing.T) {
})
time.Sleep(clusterTestPausePeriod)
TestWaitActive(t, cores[2].Core)
testCluster_ForwardRequests(t, cores[0], "core3")
testCluster_ForwardRequests(t, cores[1], "core3")
testCluster_ForwardRequests(t, cores[0], root, "core3")
testCluster_ForwardRequests(t, cores[1], root, "core3")
}
func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID string) {
func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, rootToken, remoteCoreID string) {
standby, err := c.Standby()
if err != nil {
t.Fatal(err)
@@ -347,6 +328,7 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
// We need to call Leader as that refreshes the connection info
isLeader, _, err := c.Leader()
if err != nil {
panic(err.Error())
t.Fatal(err)
}
if isLeader {
@@ -358,7 +340,7 @@ func testCluster_ForwardRequests(t *testing.T, c *TestClusterCore, remoteCoreID
if err != nil {
t.Fatal(err)
}
req.Header.Add("X-Vault-Token", c.Root)
req.Header.Add("X-Vault-Token", rootToken)
statusCode, header, respBytes, err := c.ForwardRequest(req)
if err != nil {

View File

@@ -29,6 +29,7 @@ import (
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/mlock"
"github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/shamir"
@@ -103,9 +104,6 @@ var (
LastRemoteWAL = lastRemoteWALImpl
)
// ReloadFunc are functions that are called when a reload is requested.
type ReloadFunc func(map[string]interface{}) error
// NonFatalError is an error that can be returned during NewCore that should be
// displayed but not cause a program exit
type NonFatalError struct {
@@ -273,7 +271,7 @@ type Core struct {
cachingDisabled bool
// reloadFuncs is a map containing reload functions
reloadFuncs map[string][]ReloadFunc
reloadFuncs map[string][]reload.ReloadFunc
// reloadFuncsLock controls access to the funcs
reloadFuncsLock sync.RWMutex
@@ -394,7 +392,7 @@ type CoreConfig struct {
PluginDirectory string `json:"plugin_directory" structs:"plugin_directory" mapstructure:"plugin_directory"`
ReloadFuncs *map[string][]ReloadFunc
ReloadFuncs *map[string][]reload.ReloadFunc
ReloadFuncsLock *sync.RWMutex
}
@@ -500,7 +498,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
// the caller can share state
conf.ReloadFuncsLock = &c.reloadFuncsLock
c.reloadFuncsLock.Lock()
c.reloadFuncs = make(map[string][]ReloadFunc)
c.reloadFuncs = make(map[string][]reload.ReloadFunc)
c.reloadFuncsLock.Unlock()
conf.ReloadFuncs = &c.reloadFuncs

View File

@@ -9,7 +9,7 @@ import (
"github.com/hashicorp/vault/builtin/plugin"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/http"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
lplugin "github.com/hashicorp/vault/logical/plugin"
"github.com/hashicorp/vault/logical/plugin/mock"
@@ -24,15 +24,13 @@ func TestSystemBackend_enableAuth_plugin(t *testing.T) {
},
}
cluster := vault.NewTestCluster(t, coreConfig, true)
cluster.StartListeners()
defer cluster.CloseListeners()
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
cores[0].Handler.Handle("/", http.Handler(cores[0].Core))
cores[1].Handler.Handle("/", http.Handler(cores[1].Core))
cores[2].Handler.Handle("/", http.Handler(cores[2].Core))
core := cores[0]
b := vault.NewSystemBackend(core.Core)
@@ -50,6 +48,8 @@ func TestSystemBackend_enableAuth_plugin(t *testing.T) {
t.Fatal(err)
}
os.Setenv(pluginutil.PluginCACertPEMEnv, string(cluster.CACertPEM))
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain")
req := logical.TestRequest(t, logical.UpdateOperation, "auth/mock-plugin")
@@ -70,7 +70,12 @@ func TestBackend_PluginMain(t *testing.T) {
return
}
content := []byte(vault.TestClusterCACert)
caPem := os.Getenv(pluginutil.PluginCACertPEMEnv)
if caPem == "" {
t.Fatal("CA cert not passed in")
}
content := []byte(caPem)
tmpfile, err := ioutil.TempFile("", "test-cacert")
if err != nil {
t.Fatal(err)

View File

@@ -2,13 +2,20 @@ package vault
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"math/big"
mathrand "math/rand"
"net"
"net/http"
"os"
@@ -29,6 +36,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/logformat"
"github.com/hashicorp/vault/helper/reload"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@@ -590,10 +598,20 @@ func TestWaitActive(t testing.TB, core *Core) {
}
type TestCluster struct {
BarrierKeys [][]byte
CACert *x509.Certificate
CACertBytes []byte
CACertPEM []byte
CAKey *ecdsa.PrivateKey
CAKeyPEM []byte
Cores []*TestClusterCore
ID string
RootToken string
RootCAs *x509.CertPool
TempDir string
}
func (t *TestCluster) StartListeners() {
func (t *TestCluster) Start() {
for _, core := range t.Cores {
if core.Server != nil {
for _, ln := range core.Listeners {
@@ -603,7 +621,7 @@ func (t *TestCluster) StartListeners() {
}
}
func (t *TestCluster) CloseListeners() {
func (t *TestCluster) Cleanup() {
for _, core := range t.Cores {
if core.Listeners != nil {
for _, ln := range core.Listeners {
@@ -611,6 +629,11 @@ func (t *TestCluster) CloseListeners() {
}
}
}
if t.TempDir != "" {
os.RemoveAll(t.TempDir)
}
// Give time to actually shut down/clean up before the next test
time.Sleep(time.Second)
}
@@ -622,108 +645,285 @@ type TestListener struct {
type TestClusterCore struct {
*Core
Listeners []*TestListener
Handler *http.ServeMux
Server *http.Server
Root string
BarrierKeys [][]byte
CACertBytes []byte
CACert *x509.Certificate
TLSConfig *tls.Config
ClusterID string
Client *api.Client
Handler http.Handler
Listeners []*TestListener
ReloadFuncs *map[string][]reload.ReloadFunc
ReloadFuncsLock *sync.RWMutex
Server *http.Server
ServerCert *x509.Certificate
ServerCertBytes []byte
ServerCertPEM []byte
ServerKey *ecdsa.PrivateKey
ServerKeyPEM []byte
TLSConfig *tls.Config
}
func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCluster {
//
// TLS setup
//
block, _ := pem.Decode([]byte(TestClusterCACert))
if block == nil {
t.Fatal("error decoding cluster CA cert")
type TestClusterOptions struct {
KeepStandbysSealed bool
HandlerFunc func(*Core) http.Handler
BaseListenAddress string
}
func NewTestCluster(t testing.TB, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
certIPs := []net.IP{
net.IPv6loopback,
net.ParseIP("127.0.0.1"),
}
var baseAddr *net.TCPAddr
if opts.BaseListenAddress != "" {
var err error
baseAddr, err = net.ResolveTCPAddr("tcp", opts.BaseListenAddress)
if err != nil {
t.Fatal("could not parse given base IP")
}
certIPs = append(certIPs, baseAddr.IP)
}
var testCluster TestCluster
tempDir, err := ioutil.TempDir("", "vault-test-cluster-")
if err != nil {
t.Fatal(err)
}
testCluster.TempDir = tempDir
caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
testCluster.CAKey = caKey
caCertTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "localhost",
},
DNSNames: []string{"localhost"},
IPAddresses: certIPs,
KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
SerialNumber: big.NewInt(mathrand.Int63()),
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(262980 * time.Hour),
BasicConstraintsValid: true,
IsCA: true,
}
caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
if err != nil {
t.Fatal(err)
}
caBytes := block.Bytes
caCert, err := x509.ParseCertificate(caBytes)
if err != nil {
t.Fatal(err)
}
serverCert, err := tls.X509KeyPair([]byte(TestClusterServerCert), []byte(TestClusterServerKey))
testCluster.CACert = caCert
testCluster.CACertBytes = caBytes
testCluster.RootCAs = x509.NewCertPool()
testCluster.RootCAs.AddCert(caCert)
caCertPEMBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: caBytes,
}
testCluster.CACertPEM = pem.EncodeToMemory(caCertPEMBlock)
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_cert.pem"), testCluster.CACertPEM, 0755)
if err != nil {
t.Fatal(err)
}
marshaledCAKey, err := x509.MarshalECPrivateKey(caKey)
if err != nil {
t.Fatal(err)
}
caKeyPEMBlock := &pem.Block{
Type: "EC PRIVATE KEY",
Bytes: marshaledCAKey,
}
testCluster.CAKeyPEM = pem.EncodeToMemory(caKeyPEMBlock)
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "ca_key.pem"), testCluster.CAKeyPEM, 0755)
if err != nil {
t.Fatal(err)
}
rootCAs := x509.NewCertPool()
rootCAs.AppendCertsFromPEM([]byte(TestClusterCACert))
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{serverCert},
RootCAs: rootCAs,
ClientCAs: rootCAs,
ClientAuth: tls.VerifyClientCertIfGiven,
s1Key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
tlsConfig.BuildNameToCertificate()
s1CertTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "localhost",
},
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{
net.IPv6loopback,
net.ParseIP("127.0.0.1"),
},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageClientAuth,
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
SerialNumber: big.NewInt(mathrand.Int63()),
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(262980 * time.Hour),
}
s1CertBytes, err := x509.CreateCertificate(rand.Reader, s1CertTemplate, caCert, s1Key.Public(), caKey)
if err != nil {
t.Fatal(err)
}
s1Cert, err := x509.ParseCertificate(s1CertBytes)
if err != nil {
t.Fatal(err)
}
s1CertPEMBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: s1CertBytes,
}
s1CertPEM := pem.EncodeToMemory(s1CertPEMBlock)
s1MarshaledKey, err := x509.MarshalECPrivateKey(s1Key)
if err != nil {
t.Fatal(err)
}
s1KeyPEMBlock := &pem.Block{
Type: "EC PRIVATE KEY",
Bytes: s1MarshaledKey,
}
s1KeyPEM := pem.EncodeToMemory(s1KeyPEMBlock)
// Sanity checking
block, _ = pem.Decode([]byte(TestClusterServerCert))
if block == nil {
t.Fatal(err)
}
parsedServerCert, err := x509.ParseCertificate(block.Bytes)
s2Key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
chains, err := parsedServerCert.Verify(x509.VerifyOptions{
DNSName: "127.0.0.1",
Roots: rootCAs,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
})
s2CertTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "localhost",
},
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{
net.IPv6loopback,
net.ParseIP("127.0.0.1"),
},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageClientAuth,
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
SerialNumber: big.NewInt(mathrand.Int63()),
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(262980 * time.Hour),
}
s2CertBytes, err := x509.CreateCertificate(rand.Reader, s2CertTemplate, caCert, s2Key.Public(), caKey)
if err != nil {
t.Fatal(err)
}
if chains == nil || len(chains) == 0 {
t.Fatal("no verified chains for server auth")
}
chains, err = parsedServerCert.Verify(x509.VerifyOptions{
DNSName: "127.0.0.1",
Roots: rootCAs,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
s2Cert, err := x509.ParseCertificate(s2CertBytes)
if err != nil {
t.Fatal(err)
}
if chains == nil || len(chains) == 0 {
t.Fatal("no verified chains for chains auth")
s2CertPEMBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: s2CertBytes,
}
s2CertPEM := pem.EncodeToMemory(s2CertPEMBlock)
s2MarshaledKey, err := x509.MarshalECPrivateKey(s2Key)
if err != nil {
t.Fatal(err)
}
s2KeyPEMBlock := &pem.Block{
Type: "EC PRIVATE KEY",
Bytes: s2MarshaledKey,
}
s2KeyPEM := pem.EncodeToMemory(s2KeyPEMBlock)
s3Key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
s3CertTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "localhost",
},
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{
net.IPv6loopback,
net.ParseIP("127.0.0.1"),
},
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageClientAuth,
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement,
SerialNumber: big.NewInt(mathrand.Int63()),
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(262980 * time.Hour),
}
s3CertBytes, err := x509.CreateCertificate(rand.Reader, s3CertTemplate, caCert, s3Key.Public(), caKey)
if err != nil {
t.Fatal(err)
}
s3Cert, err := x509.ParseCertificate(s3CertBytes)
if err != nil {
t.Fatal(err)
}
s3CertPEMBlock := &pem.Block{
Type: "CERTIFICATE",
Bytes: s3CertBytes,
}
s3CertPEM := pem.EncodeToMemory(s3CertPEMBlock)
s3MarshaledKey, err := x509.MarshalECPrivateKey(s3Key)
if err != nil {
t.Fatal(err)
}
s3KeyPEMBlock := &pem.Block{
Type: "EC PRIVATE KEY",
Bytes: s3MarshaledKey,
}
s3KeyPEM := pem.EncodeToMemory(s3KeyPEMBlock)
logger := logformat.NewVaultLogger(log.LevelTrace)
//
// Listener setup
//
ln, err := net.ListenTCP("tcp", &net.TCPAddr{
ports := []int{0, 0, 0}
if baseAddr != nil {
ports = []int{baseAddr.Port, baseAddr.Port + 1, baseAddr.Port + 2}
} else {
baseAddr = &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 0,
})
}
}
baseAddr.Port = ports[0]
ln, err := net.ListenTCP("tcp", baseAddr)
if err != nil {
t.Fatal(err)
}
s1CertFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node1_port_%d_cert.pem", ln.Addr().(*net.TCPAddr).Port))
s1KeyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node1_port_%d_key.pem", ln.Addr().(*net.TCPAddr).Port))
err = ioutil.WriteFile(s1CertFile, s1CertPEM, 0755)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(s1KeyFile, s1KeyPEM, 0755)
if err != nil {
t.Fatal(err)
}
s1TLSCert, err := tls.X509KeyPair(s1CertPEM, s1KeyPEM)
if err != nil {
t.Fatal(err)
}
s1CertGetter := reload.NewCertificateGetter(s1CertFile, s1KeyFile)
s1TLSConfig := &tls.Config{
Certificates: []tls.Certificate{s1TLSCert},
RootCAs: testCluster.RootCAs,
ClientCAs: testCluster.RootCAs,
ClientAuth: tls.VerifyClientCertIfGiven,
NextProtos: []string{"h2", "http/1.1"},
GetCertificate: s1CertGetter.GetCertificate,
}
s1TLSConfig.BuildNameToCertificate()
c1lns := []*TestListener{&TestListener{
Listener: tls.NewListener(ln, tlsConfig),
Listener: tls.NewListener(ln, s1TLSConfig),
Address: ln.Addr().(*net.TCPAddr),
},
}
ln, err = net.ListenTCP("tcp", &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 0,
})
if err != nil {
t.Fatal(err)
}
c1lns = append(c1lns, &TestListener{
Listener: tls.NewListener(ln, tlsConfig),
Address: ln.Addr().(*net.TCPAddr),
})
handler1 := http.NewServeMux()
var handler1 http.Handler = http.NewServeMux()
server1 := &http.Server{
Handler: handler1,
}
@@ -731,19 +931,41 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
t.Fatal(err)
}
ln, err = net.ListenTCP("tcp", &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 0,
})
baseAddr.Port = ports[1]
ln, err = net.ListenTCP("tcp", baseAddr)
if err != nil {
t.Fatal(err)
}
s2CertFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node2_port_%d_cert.pem", ln.Addr().(*net.TCPAddr).Port))
s2KeyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node2_port_%d_key.pem", ln.Addr().(*net.TCPAddr).Port))
err = ioutil.WriteFile(s2CertFile, s2CertPEM, 0755)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(s2KeyFile, s2KeyPEM, 0755)
if err != nil {
t.Fatal(err)
}
s2TLSCert, err := tls.X509KeyPair(s2CertPEM, s2KeyPEM)
if err != nil {
t.Fatal(err)
}
s2CertGetter := reload.NewCertificateGetter(s2CertFile, s2KeyFile)
s2TLSConfig := &tls.Config{
Certificates: []tls.Certificate{s2TLSCert},
RootCAs: testCluster.RootCAs,
ClientCAs: testCluster.RootCAs,
ClientAuth: tls.VerifyClientCertIfGiven,
NextProtos: []string{"h2", "http/1.1"},
GetCertificate: s2CertGetter.GetCertificate,
}
s2TLSConfig.BuildNameToCertificate()
c2lns := []*TestListener{&TestListener{
Listener: tls.NewListener(ln, tlsConfig),
Listener: tls.NewListener(ln, s2TLSConfig),
Address: ln.Addr().(*net.TCPAddr),
},
}
handler2 := http.NewServeMux()
var handler2 http.Handler = http.NewServeMux()
server2 := &http.Server{
Handler: handler2,
}
@@ -751,19 +973,41 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
t.Fatal(err)
}
ln, err = net.ListenTCP("tcp", &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 0,
})
baseAddr.Port = ports[2]
ln, err = net.ListenTCP("tcp", baseAddr)
if err != nil {
t.Fatal(err)
}
s3CertFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node3_port_%d_cert.pem", ln.Addr().(*net.TCPAddr).Port))
s3KeyFile := filepath.Join(testCluster.TempDir, fmt.Sprintf("node3_port_%d_key.pem", ln.Addr().(*net.TCPAddr).Port))
err = ioutil.WriteFile(s3CertFile, s3CertPEM, 0755)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(s3KeyFile, s3KeyPEM, 0755)
if err != nil {
t.Fatal(err)
}
s3TLSCert, err := tls.X509KeyPair(s3CertPEM, s3KeyPEM)
if err != nil {
t.Fatal(err)
}
s3CertGetter := reload.NewCertificateGetter(s3CertFile, s3KeyFile)
s3TLSConfig := &tls.Config{
Certificates: []tls.Certificate{s3TLSCert},
RootCAs: testCluster.RootCAs,
ClientCAs: testCluster.RootCAs,
ClientAuth: tls.VerifyClientCertIfGiven,
NextProtos: []string{"h2", "http/1.1"},
GetCertificate: s3CertGetter.GetCertificate,
}
s3TLSConfig.BuildNameToCertificate()
c3lns := []*TestListener{&TestListener{
Listener: tls.NewListener(ln, tlsConfig),
Listener: tls.NewListener(ln, s3TLSConfig),
Address: ln.Addr().(*net.TCPAddr),
},
}
handler3 := http.NewServeMux()
var handler3 http.Handler = http.NewServeMux()
server3 := &http.Server{
Handler: handler3,
}
@@ -771,22 +1015,39 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
t.Fatal(err)
}
// Create three cores with the same physical and different redirect/cluster addrs
// Create three cores with the same physical and different redirect/cluster
// addrs.
// N.B.: On OSX, instead of random ports, it assigns new ports to new
// listeners sequentially. Aside from being a bad idea in a security sense,
// it also broke tests that assumed it was OK to just use the port above
// the redirect addr. This has now been changed to 10 ports above, but if
// the redirect addr. This has now been changed to 105 ports above, but if
// we ever do more than three nodes in a cluster it may need to be bumped.
// Note: it's 105 so that we don't conflict with a running Consul by
// default.
coreConfig := &CoreConfig{
LogicalBackends: make(map[string]logical.Factory),
CredentialBackends: make(map[string]logical.Factory),
AuditBackends: make(map[string]audit.Factory),
RedirectAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port),
ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port+100),
ClusterAddr: fmt.Sprintf("https://127.0.0.1:%d", c1lns[0].Address.Port+105),
DisableMlock: true,
EnableUI: true,
}
if base != nil {
coreConfig.DisableCache = base.DisableCache
coreConfig.EnableUI = base.EnableUI
coreConfig.DefaultLeaseTTL = base.DefaultLeaseTTL
coreConfig.MaxLeaseTTL = base.MaxLeaseTTL
coreConfig.CacheSize = base.CacheSize
coreConfig.PluginDirectory = base.PluginDirectory
coreConfig.Seal = base.Seal
coreConfig.DevToken = base.DevToken
if !coreConfig.DisableMlock {
base.DisableMlock = false
}
if base.Physical != nil {
coreConfig.Physical = base.Physical
}
@@ -835,24 +1096,36 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
if err != nil {
t.Fatalf("err: %v", err)
}
if opts != nil && opts.HandlerFunc != nil {
handler1 = opts.HandlerFunc(c1)
server1.Handler = handler1
}
coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port)
if coreConfig.ClusterAddr != "" {
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port+100)
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c2lns[0].Address.Port+105)
}
c2, err := NewCore(coreConfig)
if err != nil {
t.Fatalf("err: %v", err)
}
if opts != nil && opts.HandlerFunc != nil {
handler2 = opts.HandlerFunc(c2)
server2.Handler = handler2
}
coreConfig.RedirectAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port)
if coreConfig.ClusterAddr != "" {
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port+100)
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", c3lns[0].Address.Port+105)
}
c3, err := NewCore(coreConfig)
if err != nil {
t.Fatalf("err: %v", err)
}
if opts != nil && opts.HandlerFunc != nil {
handler3 = opts.HandlerFunc(c3)
server3.Handler = handler3
}
//
// Clustering setup
@@ -862,7 +1135,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
for i, ln := range lns {
ret[i] = &net.TCPAddr{
IP: ln.Address.IP,
Port: ln.Address.Port + 100,
Port: ln.Address.Port + 105,
}
}
return ret
@@ -872,7 +1145,28 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
c2.SetClusterHandler(handler2)
c3.SetClusterListenerAddrs(clusterAddrGen(c3lns))
c3.SetClusterHandler(handler3)
keys, root := TestCoreInitClusterWrapperSetup(t, c1, clusterAddrGen(c1lns), handler1)
barrierKeys, _ := copystructure.Copy(keys)
testCluster.BarrierKeys = barrierKeys.([][]byte)
testCluster.RootToken = root
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(root), 0755)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
for i, key := range testCluster.BarrierKeys {
buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
if i < len(testCluster.BarrierKeys)-1 {
buf.WriteRune('\n')
}
}
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "barrier_keys"), buf.Bytes(), 0755)
if err != nil {
t.Fatal(err)
}
for _, key := range keys {
if _, err := c1.Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
@@ -890,7 +1184,7 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
TestWaitActive(t, c1)
if unsealStandbys {
if opts == nil || !opts.KeepStandbysSealed {
for _, key := range keys {
if _, err := c2.Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
@@ -926,8 +1220,9 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
if err != nil {
t.Fatal(err)
}
testCluster.ID = cluster.ID
getAPIClient := func(port int) *api.Client {
getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client {
transport := cleanhttp.DefaultPooledTransport()
transport.TLSClientConfig = tlsConfig
client := &http.Client{
@@ -949,152 +1244,66 @@ func NewTestCluster(t testing.TB, base *CoreConfig, unsealStandbys bool) *TestCl
}
var ret []*TestClusterCore
keyCopies, _ := copystructure.Copy(keys)
ret = append(ret, &TestClusterCore{
t1 := &TestClusterCore{
Core: c1,
ServerKey: s1Key,
ServerKeyPEM: s1KeyPEM,
ServerCert: s1Cert,
ServerCertBytes: s1CertBytes,
ServerCertPEM: s1CertPEM,
Listeners: c1lns,
Handler: handler1,
Server: server1,
Root: root,
BarrierKeys: keyCopies.([][]byte),
CACertBytes: caBytes,
CACert: caCert,
TLSConfig: tlsConfig,
ClusterID: cluster.ID,
Client: getAPIClient(c1lns[0].Address.Port),
})
TLSConfig: s1TLSConfig,
Client: getAPIClient(c1lns[0].Address.Port, s1TLSConfig),
}
t1.ReloadFuncs = &c1.reloadFuncs
t1.ReloadFuncsLock = &c1.reloadFuncsLock
t1.ReloadFuncsLock.Lock()
(*t1.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{s1CertGetter.Reload}
t1.ReloadFuncsLock.Unlock()
ret = append(ret, t1)
keyCopies, _ = copystructure.Copy(keys)
ret = append(ret, &TestClusterCore{
t2 := &TestClusterCore{
Core: c2,
ServerKey: s2Key,
ServerKeyPEM: s2KeyPEM,
ServerCert: s2Cert,
ServerCertBytes: s2CertBytes,
ServerCertPEM: s2CertPEM,
Listeners: c2lns,
Handler: handler2,
Server: server2,
Root: root,
BarrierKeys: keyCopies.([][]byte),
CACertBytes: caBytes,
CACert: caCert,
TLSConfig: tlsConfig,
ClusterID: cluster.ID,
Client: getAPIClient(c2lns[0].Address.Port),
})
TLSConfig: s2TLSConfig,
Client: getAPIClient(c2lns[0].Address.Port, s2TLSConfig),
}
t2.ReloadFuncs = &c2.reloadFuncs
t2.ReloadFuncsLock = &c2.reloadFuncsLock
t2.ReloadFuncsLock.Lock()
(*t2.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{s2CertGetter.Reload}
t2.ReloadFuncsLock.Unlock()
ret = append(ret, t2)
keyCopies, _ = copystructure.Copy(keys)
ret = append(ret, &TestClusterCore{
t3 := &TestClusterCore{
Core: c3,
ServerKey: s3Key,
ServerKeyPEM: s3KeyPEM,
ServerCert: s3Cert,
ServerCertBytes: s3CertBytes,
ServerCertPEM: s3CertPEM,
Listeners: c3lns,
Handler: handler3,
Server: server3,
Root: root,
BarrierKeys: keyCopies.([][]byte),
CACertBytes: caBytes,
CACert: caCert,
TLSConfig: tlsConfig,
ClusterID: cluster.ID,
Client: getAPIClient(c3lns[0].Address.Port),
})
TLSConfig: s3TLSConfig,
Client: getAPIClient(c3lns[0].Address.Port, s3TLSConfig),
}
t3.ReloadFuncs = &c3.reloadFuncs
t3.ReloadFuncsLock = &c3.reloadFuncsLock
t3.ReloadFuncsLock.Lock()
(*t3.ReloadFuncs)["listener|tcp"] = []reload.ReloadFunc{s3CertGetter.Reload}
t3.ReloadFuncsLock.Unlock()
ret = append(ret, t3)
return &TestCluster{Cores: ret}
testCluster.Cores = ret
return &testCluster
}
const (
TestClusterCACert = `-----BEGIN CERTIFICATE-----
MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2
NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG
9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS
xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP
67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE
JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb
cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY
WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w
DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU
G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy
MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB
AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW
n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh
MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/
spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d
CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q
5gn6KxUPBKHEtNzs5DgGM7nq
-----END CERTIFICATE-----`
TestClusterCAKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c
N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl
HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2
eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ
1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z
wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel
CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg
eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/
fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW
TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB
nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud
XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh
Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X
YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW
2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7
YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ
48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8
aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX
Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB
55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1
HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt
TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9
hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP
QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr
PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3
-----END RSA PRIVATE KEY-----`
TestClusterServerCert = `-----BEGIN CERTIFICATE-----
MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL
BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2
NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB
9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI
b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL
5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W
1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF
+czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj
gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe
Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH
QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w
LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE
fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv
cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX
lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z
6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch
f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D
Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe
TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg==
-----END CERTIFICATE-----`
TestClusterServerKey = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm
peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq
/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61
fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV
T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq
zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug
RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6
mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh
bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL
FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV
WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m
tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx
PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3
8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz
HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8
goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU
jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu
kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f
DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB
p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe
X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS
rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P
aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455
t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx
we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ==
-----END RSA PRIVATE KEY-----`
)