mirror of
				https://github.com/optim-enterprises-bv/vault.git
				synced 2025-10-30 02:02:43 +00:00 
			
		
		
		
	Backport of VAULT-20476: vault.NewCore refactor. into release/1.13.x (#23658)
* VAULT-20476: vault.NewCore refactor. (#23644) * NewCore tech debt refactoring * addExtraCredentialBackends * singletonMounts => mountTypeToken instead of 'token' * NewCore tests support ent backend addition * PR feedback * reorder method calls * mounthPath___ standardization * Try to be more explicit about the min number of backends * Include cluster listener * explicit declaration of events before assignment * Removed nil checking * resolve conflicts * resolve conflicts * resolve conflicts --------- Co-authored-by: Peter Wilson <peter.wilson@hashicorp.com>
This commit is contained in:
		 hc-github-team-secure-vault-core
					hc-github-team-secure-vault-core
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							7e451a9853
						
					
				
				
					commit
					624ed9196a
				
			| @@ -113,7 +113,7 @@ func (c *Core) enableCredentialInternal(ctx context.Context, entry *MountEntry, | ||||
| 	} | ||||
|  | ||||
| 	// Ensure the token backend is a singleton | ||||
| 	if entry.Type == "token" { | ||||
| 	if entry.Type == mountTypeToken { | ||||
| 		return fmt.Errorf("token credential backend cannot be instantiated") | ||||
| 	} | ||||
|  | ||||
| @@ -880,7 +880,7 @@ func (c *Core) setupCredentials(ctx context.Context) error { | ||||
| 		} | ||||
|  | ||||
| 		// Check if this is the token store | ||||
| 		if entry.Type == "token" { | ||||
| 		if entry.Type == mountTypeToken { | ||||
| 			c.tokenStore = backend.(*TokenStore) | ||||
|  | ||||
| 			// At some point when this isn't beta we may persist this but for | ||||
| @@ -890,7 +890,7 @@ func (c *Core) setupCredentials(ctx context.Context) error { | ||||
| 			// this is loaded *after* the normal mounts, including cubbyhole | ||||
| 			c.router.tokenStoreSaltFunc = c.tokenStore.Salt | ||||
| 			if !c.IsDRSecondary() { | ||||
| 				c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, cubbyholeMountPath).(*CubbyholeBackend) | ||||
| 				c.tokenStore.cubbyholeBackend = c.router.MatchingBackend(ctx, mountPathCubbyhole).(*CubbyholeBackend) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| @@ -1045,7 +1045,7 @@ func (c *Core) defaultAuthTable() *MountTable { | ||||
| 	tokenAuth := &MountEntry{ | ||||
| 		Table:            credentialTableType, | ||||
| 		Path:             "token/", | ||||
| 		Type:             "token", | ||||
| 		Type:             mountTypeToken, | ||||
| 		Description:      "token based credentials", | ||||
| 		UUID:             tokenUUID, | ||||
| 		Accessor:         tokenAccessor, | ||||
|   | ||||
							
								
								
									
										233
									
								
								vault/core.go
									
									
									
									
									
								
							
							
						
						
									
										233
									
								
								vault/core.go
									
									
									
									
									
								
							| @@ -122,6 +122,15 @@ const ( | ||||
| 	// undoLogsAreSafeStoragePath is a storage path that we write once we know undo logs are | ||||
| 	// safe, so we don't have to keep checking all the time. | ||||
| 	undoLogsAreSafeStoragePath = "core/raft/undo_logs_are_safe" | ||||
|  | ||||
| 	ErrMlockFailedTemplate = "Failed to lock memory: %v\n\n" + | ||||
| 		"This usually means that the mlock syscall is not available.\n" + | ||||
| 		"Vault uses mlock to prevent memory from being swapped to\n" + | ||||
| 		"disk. This requires root privileges as well as a machine\n" + | ||||
| 		"that supports mlock. Please enable mlock on your system or\n" + | ||||
| 		"disable Vault from using it. To disable Vault from using it,\n" + | ||||
| 		"set the `disable_mlock` configuration option in your configuration\n" + | ||||
| 		"file." | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| @@ -1121,30 +1130,27 @@ func CreateCore(conf *CoreConfig) (*Core, error) { | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // NewCore is used to construct a new core | ||||
| // NewCore creates, initializes and configures a Vault node (core). | ||||
| func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 	var err error | ||||
| 	// NOTE: The order of configuration of the core has some importance, as we can | ||||
| 	// make use of an early return if we are running this new core in recovery mode. | ||||
| 	c, err := CreateCore(conf) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if err = coreInit(c, conf); err != nil { | ||||
|  | ||||
| 	err = coreInit(c, conf) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if !conf.DisableMlock { | ||||
| 		// Ensure our memory usage is locked into physical RAM | ||||
| 		if err := mlock.LockMemory(); err != nil { | ||||
| 			return nil, fmt.Errorf( | ||||
| 				"Failed to lock memory: %v\n\n"+ | ||||
| 					"This usually means that the mlock syscall is not available.\n"+ | ||||
| 					"Vault uses mlock to prevent memory from being swapped to\n"+ | ||||
| 					"disk. This requires root privileges as well as a machine\n"+ | ||||
| 					"that supports mlock. Please enable mlock on your system or\n"+ | ||||
| 					"disable Vault from using it. To disable Vault from using it,\n"+ | ||||
| 					"set the `disable_mlock` configuration option in your configuration\n"+ | ||||
| 					"file.", | ||||
| 				err) | ||||
| 	switch { | ||||
| 	case conf.DisableMlock: | ||||
| 		// User configured that memory lock should be disabled on unix systems. | ||||
| 	default: | ||||
| 		err = mlock.LockMemory() | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf(ErrMlockFailedTemplate, err) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| @@ -1154,9 +1160,11 @@ func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 		return nil, fmt.Errorf("barrier setup failed: %w", err) | ||||
| 	} | ||||
|  | ||||
| 	if err := storedLicenseCheck(c, conf); err != nil { | ||||
| 	err = storedLicenseCheck(c, conf) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// We create the funcs here, then populate the given config with it so that | ||||
| 	// the caller can share state | ||||
| 	conf.ReloadFuncsLock = &c.reloadFuncsLock | ||||
| @@ -1166,12 +1174,12 @@ func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 	conf.ReloadFuncs = &c.reloadFuncs | ||||
|  | ||||
| 	c.rollbackPeriod = conf.RollbackPeriod | ||||
| 	if conf.RollbackPeriod == 0 { | ||||
| 		c.rollbackPeriod = time.Minute | ||||
| 	if c.rollbackPeriod == 0 { | ||||
| 		// Default to 1 minute | ||||
| 		c.rollbackPeriod = 1 * time.Minute | ||||
| 	} | ||||
|  | ||||
| 	// All the things happening below this are not required in | ||||
| 	// recovery mode | ||||
| 	// For recovery mode we've now configured enough to return early. | ||||
| 	if c.recoveryMode { | ||||
| 		return c, nil | ||||
| 	} | ||||
| @@ -1190,81 +1198,39 @@ func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 		c.pluginFilePermissions = conf.PluginFilePermissions | ||||
| 	} | ||||
|  | ||||
| 	createSecondaries(c, conf) | ||||
| 	// Create secondaries (this will only impact Enterprise versions of Vault) | ||||
| 	c.createSecondaries(conf.Logger) | ||||
|  | ||||
| 	if conf.HAPhysical != nil && conf.HAPhysical.HAEnabled() { | ||||
| 		c.ha = conf.HAPhysical | ||||
| 	} | ||||
|  | ||||
| 	// MFA method | ||||
| 	c.loginMFABackend = NewLoginMFABackend(c, conf.Logger) | ||||
|  | ||||
| 	logicalBackends := make(map[string]logical.Factory) | ||||
| 	for k, f := range conf.LogicalBackends { | ||||
| 		logicalBackends[k] = f | ||||
| 	} | ||||
| 	_, ok := logicalBackends["kv"] | ||||
| 	if !ok { | ||||
| 		logicalBackends["kv"] = PassthroughBackendFactory | ||||
| 	} | ||||
| 	// Logical backends | ||||
| 	c.configureLogicalBackends(conf.LogicalBackends, conf.Logger, conf.AdministrativeNamespacePath) | ||||
|  | ||||
| 	logicalBackends["cubbyhole"] = CubbyholeBackendFactory | ||||
| 	logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		sysBackendLogger := conf.Logger.Named("system") | ||||
| 		b := NewSystemBackend(c, sysBackendLogger) | ||||
| 		if err := b.Setup(ctx, config); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return b, nil | ||||
| 	} | ||||
| 	logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		identityLogger := conf.Logger.Named("identity") | ||||
| 		return NewIdentityStore(ctx, c, config, identityLogger) | ||||
| 	} | ||||
| 	addExtraLogicalBackends(c, logicalBackends, conf.AdministrativeNamespacePath) | ||||
| 	c.logicalBackends = logicalBackends | ||||
| 	// Credentials backends | ||||
| 	c.configureCredentialsBackends(conf.CredentialBackends, conf.Logger) | ||||
|  | ||||
| 	credentialBackends := make(map[string]logical.Factory) | ||||
| 	for k, f := range conf.CredentialBackends { | ||||
| 		credentialBackends[k] = f | ||||
| 	} | ||||
| 	credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		tsLogger := conf.Logger.Named("token") | ||||
| 		return NewTokenStore(ctx, tsLogger, c, config) | ||||
| 	} | ||||
| 	addExtraCredentialBackends(c, credentialBackends) | ||||
| 	c.credentialBackends = credentialBackends | ||||
|  | ||||
| 	auditBackends := make(map[string]audit.Factory) | ||||
| 	for k, f := range conf.AuditBackends { | ||||
| 		auditBackends[k] = f | ||||
| 	} | ||||
| 	c.auditBackends = auditBackends | ||||
| 	// Audit backends | ||||
| 	c.configureAuditBackends(conf.AuditBackends) | ||||
|  | ||||
| 	// UI | ||||
| 	uiStoragePrefix := systemBarrierPrefix + "ui" | ||||
| 	c.uiConfig = NewUIConfig(conf.EnableUI, physical.NewView(c.physical, uiStoragePrefix), NewBarrierView(c.barrier, uiStoragePrefix)) | ||||
|  | ||||
| 	c.clusterListener.Store((*cluster.Listener)(nil)) | ||||
|  | ||||
| 	// for listeners with custom response headers, configuring customListenerHeader | ||||
| 	if conf.RawConfig.Listeners != nil { | ||||
| 		uiHeaders, err := c.UIHeaders() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		c.customListenerHeader.Store(NewListenerCustomHeader(conf.RawConfig.Listeners, c.logger, uiHeaders)) | ||||
| 	} else { | ||||
| 		c.customListenerHeader.Store(([]*ListenerCustomHeaders)(nil)) | ||||
| 	// Listeners | ||||
| 	err = c.configureListeners(conf) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	logRequestsLevel := conf.RawConfig.LogRequestsLevel | ||||
| 	c.logRequestsLevel = uberAtomic.NewInt32(0) | ||||
| 	switch { | ||||
| 	case log.LevelFromString(logRequestsLevel) > log.NoLevel && log.LevelFromString(logRequestsLevel) < log.Off: | ||||
| 		c.logRequestsLevel.Store(int32(log.LevelFromString(logRequestsLevel))) | ||||
| 	case logRequestsLevel != "": | ||||
| 		c.logger.Warn("invalid log_requests_level", "level", conf.RawConfig.LogRequestsLevel) | ||||
| 	} | ||||
| 	// Log level | ||||
| 	c.configureLogRequestLevel(conf.RawConfig.LogLevel) | ||||
|  | ||||
| 	// Quotas | ||||
| 	quotasLogger := conf.Logger.Named("quotas") | ||||
| 	c.quotaManager, err = quotas.NewManager(quotasLogger, c.quotaLeaseWalker, c.metricSink) | ||||
| 	if err != nil { | ||||
| @@ -1276,14 +1242,14 @@ func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// Version history | ||||
| 	if c.versionHistory == nil { | ||||
| 		c.logger.Info("Initializing version history cache for core") | ||||
| 		c.versionHistory = make(map[string]VaultVersion) | ||||
| 	} | ||||
|  | ||||
| 	// start the event system | ||||
| 	eventsLogger := conf.Logger.Named("events") | ||||
| 	events, err := eventbus.NewEventBus(eventsLogger) | ||||
| 	// Events | ||||
| 	events, err := eventbus.NewEventBus(conf.Logger.Named("events")) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| @@ -1296,9 +1262,110 @@ func NewCore(conf *CoreConfig) (*Core, error) { | ||||
| 	// yet registered core to the server command's SubloggerAdder, so any new | ||||
| 	// subloggers will be in conf.AllLoggers. | ||||
| 	c.allLoggers = conf.AllLoggers | ||||
|  | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // configureListeners configures the Core with the listeners from the CoreConfig. | ||||
| func (c *Core) configureListeners(conf *CoreConfig) error { | ||||
| 	c.clusterListener.Store((*cluster.Listener)(nil)) | ||||
|  | ||||
| 	if conf.RawConfig.Listeners == nil { | ||||
| 		c.customListenerHeader.Store(([]*ListenerCustomHeaders)(nil)) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	uiHeaders, err := c.UIHeaders() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	c.customListenerHeader.Store(NewListenerCustomHeader(conf.RawConfig.Listeners, c.logger, uiHeaders)) | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // configureLogRequestLevel configures the Core with the supplied log level. | ||||
| func (c *Core) configureLogRequestLevel(level string) { | ||||
| 	c.logRequestsLevel = uberAtomic.NewInt32(0) | ||||
|  | ||||
| 	lvl := log.LevelFromString(level) | ||||
|  | ||||
| 	switch { | ||||
| 	case lvl > log.NoLevel && lvl < log.Off: | ||||
| 		c.logRequestsLevel.Store(int32(lvl)) | ||||
| 	case level != "": | ||||
| 		c.logger.Warn("invalid log_requests_level", "level", level) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // configureAuditBackends configures the Core with the ability to create audit | ||||
| // backends for various types. | ||||
| func (c *Core) configureAuditBackends(backends map[string]audit.Factory) { | ||||
| 	auditBackends := make(map[string]audit.Factory, len(backends)) | ||||
|  | ||||
| 	for k, f := range backends { | ||||
| 		auditBackends[k] = f | ||||
| 	} | ||||
|  | ||||
| 	c.auditBackends = auditBackends | ||||
| } | ||||
|  | ||||
| // configureCredentialsBackends configures the Core with the ability to create | ||||
| // credential backends for various types. | ||||
| func (c *Core) configureCredentialsBackends(backends map[string]logical.Factory, logger log.Logger) { | ||||
| 	credentialBackends := make(map[string]logical.Factory, len(backends)) | ||||
|  | ||||
| 	for k, f := range backends { | ||||
| 		credentialBackends[k] = f | ||||
| 	} | ||||
|  | ||||
| 	credentialBackends[mountTypeToken] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		return NewTokenStore(ctx, logger.Named("token"), c, config) | ||||
| 	} | ||||
|  | ||||
| 	c.credentialBackends = credentialBackends | ||||
|  | ||||
| 	c.addExtraCredentialBackends() | ||||
| } | ||||
|  | ||||
| // configureLogicalBackends configures the Core with the ability to create | ||||
| // logical backends for various types. | ||||
| func (c *Core) configureLogicalBackends(backends map[string]logical.Factory, logger log.Logger, adminNamespacePath string) { | ||||
| 	logicalBackends := make(map[string]logical.Factory, len(backends)) | ||||
|  | ||||
| 	for k, f := range backends { | ||||
| 		logicalBackends[k] = f | ||||
| 	} | ||||
|  | ||||
| 	// KV | ||||
| 	_, ok := logicalBackends[mountTypeKV] | ||||
| 	if !ok { | ||||
| 		logicalBackends[mountTypeKV] = PassthroughBackendFactory | ||||
| 	} | ||||
|  | ||||
| 	// Cubbyhole | ||||
| 	logicalBackends[mountTypeCubbyhole] = CubbyholeBackendFactory | ||||
|  | ||||
| 	// System | ||||
| 	logicalBackends[mountTypeSystem] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		b := NewSystemBackend(c, logger.Named("system")) | ||||
| 		if err := b.Setup(ctx, config); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return b, nil | ||||
| 	} | ||||
|  | ||||
| 	// Identity | ||||
| 	logicalBackends[mountTypeIdentity] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { | ||||
| 		return NewIdentityStore(ctx, c, config, logger.Named("identity")) | ||||
| 	} | ||||
|  | ||||
| 	c.logicalBackends = logicalBackends | ||||
|  | ||||
| 	c.addExtraLogicalBackends(adminNamespacePath) | ||||
| } | ||||
|  | ||||
| // handleVersionTimeStamps stores the current version at the current time to | ||||
| // storage, and then loads all versions and upgrade timestamps out from storage. | ||||
| func (c *Core) handleVersionTimeStamps(ctx context.Context) error { | ||||
|   | ||||
| @@ -10,6 +10,19 @@ import ( | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/hashicorp/vault/command/server" | ||||
|  | ||||
| 	logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" | ||||
| 	logicalDb "github.com/hashicorp/vault/builtin/logical/database" | ||||
|  | ||||
| 	"github.com/hashicorp/vault/builtin/plugin" | ||||
|  | ||||
| 	"github.com/hashicorp/vault/builtin/audit/syslog" | ||||
|  | ||||
| 	"github.com/hashicorp/vault/builtin/audit/file" | ||||
| 	"github.com/hashicorp/vault/builtin/audit/socket" | ||||
| 	"github.com/stretchr/testify/require" | ||||
|  | ||||
| 	"github.com/go-test/deep" | ||||
| 	"github.com/hashicorp/errwrap" | ||||
| 	log "github.com/hashicorp/go-hclog" | ||||
| @@ -32,6 +45,297 @@ import ( | ||||
| // invalidKey is used to test Unseal | ||||
| var invalidKey = []byte("abcdefghijklmnopqrstuvwxyz")[:17] | ||||
|  | ||||
| // TestNewCore_configureAuditBackends ensures that we are able to configure the | ||||
| // supplied audit backends when getting a NewCore. | ||||
| func TestNewCore_configureAuditBackends(t *testing.T) { | ||||
| 	t.Parallel() | ||||
|  | ||||
| 	tests := map[string]struct { | ||||
| 		backends map[string]audit.Factory | ||||
| 	}{ | ||||
| 		"none": { | ||||
| 			backends: nil, | ||||
| 		}, | ||||
| 		"file": { | ||||
| 			backends: map[string]audit.Factory{ | ||||
| 				"file": file.Factory, | ||||
| 			}, | ||||
| 		}, | ||||
| 		"socket": { | ||||
| 			backends: map[string]audit.Factory{ | ||||
| 				"socket": socket.Factory, | ||||
| 			}, | ||||
| 		}, | ||||
| 		"syslog": { | ||||
| 			backends: map[string]audit.Factory{ | ||||
| 				"syslog": syslog.Factory, | ||||
| 			}, | ||||
| 		}, | ||||
| 		"all": { | ||||
| 			backends: map[string]audit.Factory{ | ||||
| 				"file":   file.Factory, | ||||
| 				"socket": socket.Factory, | ||||
| 				"syslog": syslog.Factory, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, tc := range tests { | ||||
| 		name := name | ||||
| 		tc := tc | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			t.Parallel() | ||||
|  | ||||
| 			core := &Core{} | ||||
| 			require.Len(t, core.auditBackends, 0) | ||||
| 			core.configureAuditBackends(tc.backends) | ||||
| 			require.Len(t, core.auditBackends, len(tc.backends)) | ||||
| 			for k := range tc.backends { | ||||
| 				require.Contains(t, core.auditBackends, k) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TestNewCore_configureCredentialsBackends ensures that we are able to configure the | ||||
| // supplied credential backends, in addition to defaults, when getting a NewCore. | ||||
| func TestNewCore_configureCredentialsBackends(t *testing.T) { | ||||
| 	t.Parallel() | ||||
|  | ||||
| 	tests := map[string]struct { | ||||
| 		backends map[string]logical.Factory | ||||
| 	}{ | ||||
| 		"none": { | ||||
| 			backends: nil, | ||||
| 		}, | ||||
| 		"plugin": { | ||||
| 			backends: map[string]logical.Factory{ | ||||
| 				"plugin": plugin.Factory, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, tc := range tests { | ||||
| 		name := name | ||||
| 		tc := tc | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			t.Parallel() | ||||
|  | ||||
| 			core := &Core{} | ||||
| 			require.Len(t, core.credentialBackends, 0) | ||||
| 			core.configureCredentialsBackends(tc.backends, corehelpers.NewTestLogger(t)) | ||||
| 			require.GreaterOrEqual(t, len(core.credentialBackends), len(tc.backends)+1) // token + ent | ||||
| 			for k := range tc.backends { | ||||
| 				require.Contains(t, core.credentialBackends, k) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TestNewCore_configureLogicalBackends ensures that we are able to configure the | ||||
| // supplied logical backends, in addition to defaults, when getting a NewCore. | ||||
| func TestNewCore_configureLogicalBackends(t *testing.T) { | ||||
| 	t.Parallel() | ||||
|  | ||||
| 	// configureLogicalBackends will add some default backends for us: | ||||
| 	// cubbyhole | ||||
| 	// identity | ||||
| 	// kv | ||||
| 	// system | ||||
| 	// In addition Enterprise versions of Vault may add additional engines. | ||||
|  | ||||
| 	tests := map[string]struct { | ||||
| 		backends               map[string]logical.Factory | ||||
| 		adminNamespacePath     string | ||||
| 		expectedNonEntBackends int | ||||
| 	}{ | ||||
| 		"none": { | ||||
| 			backends:               nil, | ||||
| 			expectedNonEntBackends: 0, | ||||
| 		}, | ||||
| 		"database": { | ||||
| 			backends: map[string]logical.Factory{ | ||||
| 				"database": logicalDb.Factory, | ||||
| 			}, | ||||
| 			adminNamespacePath:     "foo", | ||||
| 			expectedNonEntBackends: 5, // database + defaults | ||||
| 		}, | ||||
| 		"kv": { | ||||
| 			backends: map[string]logical.Factory{ | ||||
| 				"kv": logicalKv.Factory, | ||||
| 			}, | ||||
| 			adminNamespacePath:     "foo", | ||||
| 			expectedNonEntBackends: 4, // kv + defaults (kv is a default) | ||||
| 		}, | ||||
| 		"plugin": { | ||||
| 			backends: map[string]logical.Factory{ | ||||
| 				"plugin": plugin.Factory, | ||||
| 			}, | ||||
| 			adminNamespacePath:     "foo", | ||||
| 			expectedNonEntBackends: 5, // plugin + defaults | ||||
| 		}, | ||||
| 		"all": { | ||||
| 			backends: map[string]logical.Factory{ | ||||
| 				"database": logicalDb.Factory, | ||||
| 				"kv":       logicalKv.Factory, | ||||
| 				"plugin":   plugin.Factory, | ||||
| 			}, | ||||
| 			adminNamespacePath:     "foo", | ||||
| 			expectedNonEntBackends: 6, // database, plugin + defaults | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, tc := range tests { | ||||
| 		name := name | ||||
| 		tc := tc | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			t.Parallel() | ||||
|  | ||||
| 			core := &Core{} | ||||
| 			require.Len(t, core.logicalBackends, 0) | ||||
| 			core.configureLogicalBackends(tc.backends, corehelpers.NewTestLogger(t), tc.adminNamespacePath) | ||||
| 			require.GreaterOrEqual(t, len(core.logicalBackends), tc.expectedNonEntBackends) | ||||
| 			require.Contains(t, core.logicalBackends, mountTypeKV) | ||||
| 			require.Contains(t, core.logicalBackends, mountTypeCubbyhole) | ||||
| 			require.Contains(t, core.logicalBackends, mountTypeSystem) | ||||
| 			require.Contains(t, core.logicalBackends, mountTypeIdentity) | ||||
| 			for k := range tc.backends { | ||||
| 				require.Contains(t, core.logicalBackends, k) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TestNewCore_configureLogRequestLevel ensures that we are able to configure the | ||||
| // supplied logging level when getting a NewCore. | ||||
| func TestNewCore_configureLogRequestLevel(t *testing.T) { | ||||
| 	t.Parallel() | ||||
|  | ||||
| 	tests := map[string]struct { | ||||
| 		level         string | ||||
| 		expectedLevel log.Level | ||||
| 	}{ | ||||
| 		"none": { | ||||
| 			level:         "", | ||||
| 			expectedLevel: log.NoLevel, | ||||
| 		}, | ||||
| 		"trace": { | ||||
| 			level:         "trace", | ||||
| 			expectedLevel: log.Trace, | ||||
| 		}, | ||||
| 		"debug": { | ||||
| 			level:         "debug", | ||||
| 			expectedLevel: log.Debug, | ||||
| 		}, | ||||
| 		"info": { | ||||
| 			level:         "info", | ||||
| 			expectedLevel: log.Info, | ||||
| 		}, | ||||
| 		"warn": { | ||||
| 			level:         "warn", | ||||
| 			expectedLevel: log.Warn, | ||||
| 		}, | ||||
| 		"error": { | ||||
| 			level:         "error", | ||||
| 			expectedLevel: log.Error, | ||||
| 		}, | ||||
| 		"bad": { | ||||
| 			level:         "foo", | ||||
| 			expectedLevel: log.NoLevel, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, tc := range tests { | ||||
| 		name := name | ||||
| 		tc := tc | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			t.Parallel() | ||||
|  | ||||
| 			// We need to supply a logger, as configureLogRequestLevel emits | ||||
| 			// warnings to the logs in certain circumstances. | ||||
| 			core := &Core{ | ||||
| 				logger: corehelpers.NewTestLogger(t), | ||||
| 			} | ||||
| 			core.configureLogRequestLevel(tc.level) | ||||
| 			require.Equal(t, tc.expectedLevel, log.Level(core.logRequestsLevel.Load())) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // TestNewCore_configureListeners tests that we are able to configure listeners | ||||
| // on a NewCore via config. | ||||
| func TestNewCore_configureListeners(t *testing.T) { | ||||
| 	// We would usually expect CoreConfig to come from server.NewConfig(). | ||||
| 	// However, we want to fiddle to give us some granular control over the config. | ||||
| 	tests := map[string]struct { | ||||
| 		config            *CoreConfig | ||||
| 		expectedListeners []*ListenerCustomHeaders | ||||
| 	}{ | ||||
| 		"nil-listeners": { | ||||
| 			config: &CoreConfig{ | ||||
| 				RawConfig: &server.Config{ | ||||
| 					SharedConfig: &configutil.SharedConfig{}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedListeners: nil, | ||||
| 		}, | ||||
| 		"listeners-empty": { | ||||
| 			config: &CoreConfig{ | ||||
| 				RawConfig: &server.Config{ | ||||
| 					SharedConfig: &configutil.SharedConfig{ | ||||
| 						Listeners: []*configutil.Listener{}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedListeners: nil, | ||||
| 		}, | ||||
| 		"listeners-some": { | ||||
| 			config: &CoreConfig{ | ||||
| 				RawConfig: &server.Config{ | ||||
| 					SharedConfig: &configutil.SharedConfig{ | ||||
| 						Listeners: []*configutil.Listener{ | ||||
| 							{Address: "foo"}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedListeners: []*ListenerCustomHeaders{ | ||||
| 				{Address: "foo"}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for name, tc := range tests { | ||||
| 		name := name | ||||
| 		tc := tc | ||||
| 		t.Run(name, func(t *testing.T) { | ||||
| 			t.Parallel() | ||||
|  | ||||
| 			// We need to init some values ourselves, usually CreateCore does this for us. | ||||
| 			logger := corehelpers.NewTestLogger(t) | ||||
| 			backend, err := inmem.NewInmem(nil, logger) | ||||
| 			require.NoError(t, err) | ||||
| 			storage := &logical.InmemStorage{} | ||||
| 			core := &Core{ | ||||
| 				clusterListener:      new(atomic.Value), | ||||
| 				customListenerHeader: new(atomic.Value), | ||||
| 				uiConfig:             NewUIConfig(false, backend, storage), | ||||
| 			} | ||||
|  | ||||
| 			err = core.configureListeners(tc.config) | ||||
| 			require.NoError(t, err) | ||||
| 			switch tc.expectedListeners { | ||||
| 			case nil: | ||||
| 				require.Nil(t, core.customListenerHeader.Load()) | ||||
| 			default: | ||||
| 				for i, v := range core.customListenerHeader.Load().([]*ListenerCustomHeaders) { | ||||
| 					require.Equal(t, v.Address, tc.config.RawConfig.Listeners[i].Address) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestNewCore_badRedirectAddr(t *testing.T) { | ||||
| 	logger = logging.NewVaultLogger(log.Trace) | ||||
|  | ||||
|   | ||||
| @@ -75,11 +75,11 @@ func (c *Core) EnableUndoLogs()                  {} | ||||
| func (c *Core) PersistUndoLogs() error           { return nil } | ||||
|  | ||||
| func (c *Core) teardownReplicationResolverHandler() {} | ||||
| func createSecondaries(*Core, *CoreConfig)          {} | ||||
| func (c *Core) createSecondaries(_ hclog.Logger)    {} | ||||
|  | ||||
| func addExtraLogicalBackends(*Core, map[string]logical.Factory, string) {} | ||||
| func (c *Core) addExtraLogicalBackends(_ string) {} | ||||
|  | ||||
| func addExtraCredentialBackends(*Core, map[string]logical.Factory) {} | ||||
| func (c *Core) addExtraCredentialBackends() {} | ||||
|  | ||||
| func preUnsealInternal(context.Context, *Core) error { return nil } | ||||
|  | ||||
|   | ||||
| @@ -2029,7 +2029,7 @@ func (b *SystemBackend) handleTuneWriteCommon(ctx context.Context, path string, | ||||
| 		if !strings.HasPrefix(path, "auth/") { | ||||
| 			return logical.ErrorResponse(fmt.Sprintf("'token_type' can only be modified on auth mounts")), logical.ErrInvalidRequest | ||||
| 		} | ||||
| 		if mountEntry.Type == "token" || mountEntry.Type == "ns_token" { | ||||
| 		if mountEntry.Type == mountTypeToken || mountEntry.Type == mountTypeNSToken { | ||||
| 			return logical.ErrorResponse(fmt.Sprintf("'token_type' cannot be set for 'token' or 'ns_token' auth mounts")), logical.ErrInvalidRequest | ||||
| 		} | ||||
|  | ||||
|   | ||||
| @@ -59,15 +59,19 @@ const ( | ||||
| 	// ListingVisibilityUnauth is the unauth type for listing visibility | ||||
| 	ListingVisibilityUnauth ListingVisibilityType = "unauth" | ||||
|  | ||||
| 	systemMountPath    = "sys/" | ||||
| 	identityMountPath  = "identity/" | ||||
| 	cubbyholeMountPath = "cubbyhole/" | ||||
| 	mountPathSystem    = "sys/" | ||||
| 	mountPathIdentity  = "identity/" | ||||
| 	mountPathCubbyhole = "cubbyhole/" | ||||
|  | ||||
| 	systemMountType      = "system" | ||||
| 	identityMountType    = "identity" | ||||
| 	cubbyholeMountType   = "cubbyhole" | ||||
| 	pluginMountType      = "plugin" | ||||
| 	mountTypeSystem      = "system" | ||||
| 	mountTypeNSSystem    = "ns_system" | ||||
| 	mountTypeIdentity    = "identity" | ||||
| 	mountTypeCubbyhole   = "cubbyhole" | ||||
| 	mountTypePlugin      = "plugin" | ||||
| 	mountTypeKV          = "kv" | ||||
| 	mountTypeNSCubbyhole = "ns_cubbyhole" | ||||
| 	mountTypeToken       = "token" | ||||
| 	mountTypeNSToken     = "ns_token" | ||||
|  | ||||
| 	MountTableUpdateStorage   = true | ||||
| 	MountTableNoUpdateStorage = false | ||||
| @@ -88,25 +92,25 @@ var ( | ||||
| 	protectedMounts = []string{ | ||||
| 		"audit/", | ||||
| 		"auth/", | ||||
| 		systemMountPath, | ||||
| 		cubbyholeMountPath, | ||||
| 		identityMountPath, | ||||
| 		mountPathSystem, | ||||
| 		mountPathCubbyhole, | ||||
| 		mountPathIdentity, | ||||
| 	} | ||||
|  | ||||
| 	untunableMounts = []string{ | ||||
| 		cubbyholeMountPath, | ||||
| 		systemMountPath, | ||||
| 		mountPathCubbyhole, | ||||
| 		mountPathSystem, | ||||
| 		"audit/", | ||||
| 		identityMountPath, | ||||
| 		mountPathIdentity, | ||||
| 	} | ||||
|  | ||||
| 	// singletonMounts can only exist in one location and are | ||||
| 	// loaded by default. These are types, not paths. | ||||
| 	singletonMounts = []string{ | ||||
| 		cubbyholeMountType, | ||||
| 		systemMountType, | ||||
| 		"token", | ||||
| 		identityMountType, | ||||
| 		mountTypeCubbyhole, | ||||
| 		mountTypeSystem, | ||||
| 		mountTypeToken, | ||||
| 		mountTypeIdentity, | ||||
| 	} | ||||
|  | ||||
| 	// mountAliases maps old backend names to new backend names, allowing us | ||||
| @@ -672,7 +676,7 @@ func (c *Core) mountInternal(ctx context.Context, entry *MountEntry, updateStora | ||||
| 	// Check for the correct backend type | ||||
| 	backendType := backend.Type() | ||||
| 	if backendType != logical.TypeLogical { | ||||
| 		if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { | ||||
| 		if entry.Type != mountTypeKV && entry.Type != mountTypeSystem && entry.Type != mountTypeCubbyhole { | ||||
| 			return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) | ||||
| 		} | ||||
| 	} | ||||
| @@ -1314,7 +1318,7 @@ func (c *Core) runMountUpdates(ctx context.Context, needPersist bool) error { | ||||
| 			entry.Local = true | ||||
| 			needPersist = true | ||||
| 		} | ||||
| 		if entry.Type == cubbyholeMountType && !entry.Local { | ||||
| 		if entry.Type == mountTypeCubbyhole && !entry.Local { | ||||
| 			entry.Local = true | ||||
| 			needPersist = true | ||||
| 		} | ||||
| @@ -1541,7 +1545,7 @@ func (c *Core) setupMounts(ctx context.Context) error { | ||||
| 			backendType := backend.Type() | ||||
|  | ||||
| 			if backendType != logical.TypeLogical { | ||||
| 				if entry.Type != "kv" && entry.Type != "system" && entry.Type != "cubbyhole" { | ||||
| 				if entry.Type != mountTypeKV && entry.Type != mountTypeSystem && entry.Type != mountTypeCubbyhole { | ||||
| 					return fmt.Errorf(`unknown backend type: "%s"`, entry.Type) | ||||
| 				} | ||||
| 			} | ||||
| @@ -1670,7 +1674,7 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView | ||||
| 	} | ||||
|  | ||||
| 	switch { | ||||
| 	case entry.Type == "plugin": | ||||
| 	case entry.Type == mountTypePlugin: | ||||
| 		conf["plugin_name"] = entry.Config.PluginName | ||||
| 	default: | ||||
| 		conf["plugin_name"] = t | ||||
| @@ -1728,7 +1732,7 @@ func (c *Core) defaultMountTable() *MountTable { | ||||
| 		if err != nil { | ||||
| 			panic(fmt.Sprintf("could not create default secret mount UUID: %v", err)) | ||||
| 		} | ||||
| 		mountAccessor, err := c.generateMountAccessor("kv") | ||||
| 		mountAccessor, err := c.generateMountAccessor(mountTypeKV) | ||||
| 		if err != nil { | ||||
| 			panic(fmt.Sprintf("could not generate default secret mount accessor: %v", err)) | ||||
| 		} | ||||
| @@ -1740,7 +1744,7 @@ func (c *Core) defaultMountTable() *MountTable { | ||||
| 		kvMount := &MountEntry{ | ||||
| 			Table:            mountTableType, | ||||
| 			Path:             "secret/", | ||||
| 			Type:             "kv", | ||||
| 			Type:             mountTypeKV, | ||||
| 			Description:      "key/value secret storage", | ||||
| 			UUID:             mountUUID, | ||||
| 			Accessor:         mountAccessor, | ||||
| @@ -1776,8 +1780,8 @@ func (c *Core) requiredMountTable() *MountTable { | ||||
| 	} | ||||
| 	cubbyholeMount := &MountEntry{ | ||||
| 		Table:            mountTableType, | ||||
| 		Path:             cubbyholeMountPath, | ||||
| 		Type:             cubbyholeMountType, | ||||
| 		Path:             mountPathCubbyhole, | ||||
| 		Type:             mountTypeCubbyhole, | ||||
| 		Description:      "per-token private secret storage", | ||||
| 		UUID:             cubbyholeUUID, | ||||
| 		Accessor:         cubbyholeAccessor, | ||||
| @@ -1801,7 +1805,7 @@ func (c *Core) requiredMountTable() *MountTable { | ||||
| 	sysMount := &MountEntry{ | ||||
| 		Table:            mountTableType, | ||||
| 		Path:             "sys/", | ||||
| 		Type:             systemMountType, | ||||
| 		Type:             mountTypeSystem, | ||||
| 		Description:      "system endpoints used for control, policy and debugging", | ||||
| 		UUID:             sysUUID, | ||||
| 		Accessor:         sysAccessor, | ||||
| @@ -1877,15 +1881,15 @@ func (c *Core) singletonMountTables() (mounts, auth *MountTable) { | ||||
|  | ||||
| func (c *Core) setCoreBackend(entry *MountEntry, backend logical.Backend, view *BarrierView) { | ||||
| 	switch entry.Type { | ||||
| 	case systemMountType: | ||||
| 	case mountTypeSystem: | ||||
| 		c.systemBackend = backend.(*SystemBackend) | ||||
| 		c.systemBarrierView = view | ||||
| 	case cubbyholeMountType: | ||||
| 	case mountTypeCubbyhole: | ||||
| 		ch := backend.(*CubbyholeBackend) | ||||
| 		ch.saltUUID = entry.UUID | ||||
| 		ch.storageView = view | ||||
| 		c.cubbyholeBackend = ch | ||||
| 	case identityMountType: | ||||
| 	case mountTypeIdentity: | ||||
| 		c.identityStore = backend.(*IdentityStore) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -30,7 +30,7 @@ func runFilteredPathsEvaluation(context.Context, *Core, bool) error           { | ||||
| // ViewPath returns storage prefix for the view | ||||
| func (e *MountEntry) ViewPath() string { | ||||
| 	switch e.Type { | ||||
| 	case systemMountType: | ||||
| 	case mountTypeSystem: | ||||
| 		return systemBarrierPrefix | ||||
| 	case "token": | ||||
| 		return path.Join(systemBarrierPrefix, tokenSubPath) + "/" | ||||
|   | ||||
| @@ -38,9 +38,9 @@ func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string) | ||||
| 		//   - auth/foo | ||||
| 		if strings.HasPrefix(mount, credentialRoutePrefix) { | ||||
| 			isAuth = true | ||||
| 		} else if strings.HasPrefix(mount, systemMountPath+credentialRoutePrefix) { | ||||
| 		} else if strings.HasPrefix(mount, mountPathSystem+credentialRoutePrefix) { | ||||
| 			isAuth = true | ||||
| 			mount = strings.TrimPrefix(mount, systemMountPath) | ||||
| 			mount = strings.TrimPrefix(mount, mountPathSystem) | ||||
| 		} | ||||
| 		if !strings.HasSuffix(mount, "/") { | ||||
| 			mount += "/" | ||||
|   | ||||
| @@ -624,9 +624,9 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc | ||||
| 	clientToken := req.ClientToken | ||||
| 	switch { | ||||
| 	case strings.HasPrefix(originalPath, "auth/token/"): | ||||
| 	case strings.HasPrefix(originalPath, "sys/"): | ||||
| 	case strings.HasPrefix(originalPath, "identity/"): | ||||
| 	case strings.HasPrefix(originalPath, cubbyholeMountPath): | ||||
| 	case strings.HasPrefix(originalPath, mountPathSystem): | ||||
| 	case strings.HasPrefix(originalPath, mountPathIdentity): | ||||
| 	case strings.HasPrefix(originalPath, mountPathCubbyhole): | ||||
| 		if req.Operation == logical.RollbackOperation { | ||||
| 			// Backend doesn't support this and it can't properly look up a | ||||
| 			// cubbyhole ID so just return here | ||||
| @@ -791,7 +791,7 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc | ||||
| 				} | ||||
|  | ||||
| 				switch re.mountEntry.Type { | ||||
| 				case "token", "ns_token": | ||||
| 				case mountTypeToken, mountTypeNSToken: | ||||
| 					// Nothing; we respect what the token store is telling us and | ||||
| 					// we don't allow tuning | ||||
| 				default: | ||||
|   | ||||
| @@ -111,7 +111,7 @@ var ( | ||||
| 			return errors.New("nil token entry") | ||||
| 		} | ||||
|  | ||||
| 		storage := ts.core.router.MatchingStorageByAPIPath(ctx, cubbyholeMountPath) | ||||
| 		storage := ts.core.router.MatchingStorageByAPIPath(ctx, mountPathCubbyhole) | ||||
| 		if storage == nil { | ||||
| 			return fmt.Errorf("no cubby mount entry") | ||||
| 		} | ||||
| @@ -2197,7 +2197,7 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data | ||||
| 			} | ||||
|  | ||||
| 			// List all the cubbyhole storage keys | ||||
| 			view := ts.core.router.MatchingStorageByAPIPath(ctx, cubbyholeMountPath) | ||||
| 			view := ts.core.router.MatchingStorageByAPIPath(ctx, mountPathCubbyhole) | ||||
| 			if view == nil { | ||||
| 				return fmt.Errorf("no cubby mount entry") | ||||
| 			} | ||||
|   | ||||
| @@ -118,8 +118,8 @@ func TestTokenStore_CubbyholeTidy(t *testing.T) { | ||||
| func testTokenStore_CubbyholeTidy(t *testing.T, c *Core, root string, nsCtx context.Context) { | ||||
| 	ts := c.tokenStore | ||||
|  | ||||
| 	backend := c.router.MatchingBackend(nsCtx, cubbyholeMountPath) | ||||
| 	view := c.router.MatchingStorageByAPIPath(nsCtx, cubbyholeMountPath) | ||||
| 	backend := c.router.MatchingBackend(nsCtx, mountPathCubbyhole) | ||||
| 	view := c.router.MatchingStorageByAPIPath(nsCtx, mountPathCubbyhole) | ||||
|  | ||||
| 	for i := 1; i <= 20; i++ { | ||||
| 		// Create 20 tokens | ||||
|   | ||||
		Reference in New Issue
	
	Block a user