mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-02 03:27:54 +00:00
Automatically track subloggers in allLoggers (#22038)
* Automatically track subloggers in allLoggers This PR introduces a new interface called `SubloggerAdder` for tracking allLoggers across startup phases. The interface enables us to register a single `SubloggerHook` during initial logger creation and hand off management of `allLoggers` during the three phases we need to handle: - Before `CoreConfig` is created, the `SubloggerHook` (`AppendToAllLoggers`) appends new subloggers to `ServerCommand.allLoggers`. - After `CoreConfig` is created and before `NewCore` returns, new subloggers are added to `CoreConfig.AllLoggers`. Intermediate state must also be kept in sync within NewCore to track new subloggers before we return to the server command and register the `Core.SubloggerAdder` implementation. - After `NewCore` returns to the server command, we register Core as the implementer of `ServerCommand.SubloggerAdder` ensuring that all new subloggers are appended to `Core.allLoggers`. * Wire up the sublogger hook in NewTestLogger
This commit is contained in:
3
changelog/22038.txt
Normal file
3
changelog/22038.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
```release-note:bug
|
||||||
|
core: All subloggers now reflect configured log level on reload.
|
||||||
|
```
|
||||||
@@ -40,6 +40,7 @@ import (
|
|||||||
"github.com/hashicorp/vault/helper/builtinplugins"
|
"github.com/hashicorp/vault/helper/builtinplugins"
|
||||||
"github.com/hashicorp/vault/helper/constants"
|
"github.com/hashicorp/vault/helper/constants"
|
||||||
"github.com/hashicorp/vault/helper/experiments"
|
"github.com/hashicorp/vault/helper/experiments"
|
||||||
|
"github.com/hashicorp/vault/helper/logging"
|
||||||
loghelper "github.com/hashicorp/vault/helper/logging"
|
loghelper "github.com/hashicorp/vault/helper/logging"
|
||||||
"github.com/hashicorp/vault/helper/metricsutil"
|
"github.com/hashicorp/vault/helper/metricsutil"
|
||||||
"github.com/hashicorp/vault/helper/namespace"
|
"github.com/hashicorp/vault/helper/namespace"
|
||||||
@@ -120,6 +121,7 @@ type ServerCommand struct {
|
|||||||
licenseReloadedCh chan (error) // for tests
|
licenseReloadedCh chan (error) // for tests
|
||||||
|
|
||||||
allLoggers []hclog.Logger
|
allLoggers []hclog.Logger
|
||||||
|
logging.SubloggerAdder
|
||||||
|
|
||||||
flagConfigs []string
|
flagConfigs []string
|
||||||
flagRecovery bool
|
flagRecovery bool
|
||||||
@@ -441,6 +443,26 @@ func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError,
|
|||||||
return config, configErrors, nil
|
return config, configErrors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AppendToAllLoggers is registered with the base logger to handle creation of
|
||||||
|
// new subloggers through the phases of server startup. There are three phases
|
||||||
|
// we need to handle: (1) Before CoreConfig is created, new subloggers are added
|
||||||
|
// to c.allLoggers; (2) After CoreConfig is created, new subloggers are added to
|
||||||
|
// CoreConfig.AllLoggers; (3) After Core instantiation, new subloggers are
|
||||||
|
// appended to Core.allLoggers. This logic is managed by the SubloggerAdder
|
||||||
|
// interface.
|
||||||
|
//
|
||||||
|
// NOTE: Core.allLoggers must be set to CoreConfig.allLoggers after NewCore to
|
||||||
|
// keep track of new subloggers added before c.SubloggerAdder gets reassigned to
|
||||||
|
// the Core implementation.
|
||||||
|
func (c *ServerCommand) AppendToAllLoggers(sub hclog.Logger) hclog.Logger {
|
||||||
|
if c.SubloggerAdder == nil {
|
||||||
|
c.allLoggers = append(c.allLoggers, sub)
|
||||||
|
return sub
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SubloggerHook(sub)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *ServerCommand) runRecoveryMode() int {
|
func (c *ServerCommand) runRecoveryMode() int {
|
||||||
config, configErrors, err := c.parseConfig()
|
config, configErrors, err := c.parseConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -586,6 +608,7 @@ func (c *ServerCommand) runRecoveryMode() int {
|
|||||||
DisableMlock: config.DisableMlock,
|
DisableMlock: config.DisableMlock,
|
||||||
RecoveryMode: c.flagRecovery,
|
RecoveryMode: c.flagRecovery,
|
||||||
ClusterAddr: config.ClusterAddr,
|
ClusterAddr: config.ClusterAddr,
|
||||||
|
AllLoggers: c.allLoggers,
|
||||||
}
|
}
|
||||||
|
|
||||||
core, newCoreError := vault.NewCore(coreConfig)
|
core, newCoreError := vault.NewCore(coreConfig)
|
||||||
@@ -809,7 +832,6 @@ func (c *ServerCommand) setupStorage(config *server.Config) (physical.Backend, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
|
namedStorageLogger := c.logger.Named("storage." + config.Storage.Type)
|
||||||
c.allLoggers = append(c.allLoggers, namedStorageLogger)
|
|
||||||
backend, err := factory(config.Storage.Config, namedStorageLogger)
|
backend, err := factory(config.Storage.Config, namedStorageLogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err)
|
return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err)
|
||||||
@@ -825,7 +847,6 @@ func beginServiceRegistration(c *ServerCommand, config *server.Config) (sr.Servi
|
|||||||
}
|
}
|
||||||
|
|
||||||
namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
|
namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type)
|
||||||
c.allLoggers = append(c.allLoggers, namedSDLogger)
|
|
||||||
|
|
||||||
// Since we haven't even begun starting Vault's core yet,
|
// Since we haven't even begun starting Vault's core yet,
|
||||||
// we know that Vault is in its pre-running state.
|
// we know that Vault is in its pre-running state.
|
||||||
@@ -1104,7 +1125,6 @@ func (c *ServerCommand) Run(args []string) int {
|
|||||||
|
|
||||||
// create GRPC logger
|
// create GRPC logger
|
||||||
namedGRPCLogFaker := c.logger.Named("grpclogfaker")
|
namedGRPCLogFaker := c.logger.Named("grpclogfaker")
|
||||||
c.allLoggers = append(c.allLoggers, namedGRPCLogFaker)
|
|
||||||
grpclog.SetLogger(&grpclogFaker{
|
grpclog.SetLogger(&grpclogFaker{
|
||||||
logger: namedGRPCLogFaker,
|
logger: namedGRPCLogFaker,
|
||||||
log: os.Getenv("VAULT_GRPC_LOGGING") != "",
|
log: os.Getenv("VAULT_GRPC_LOGGING") != "",
|
||||||
@@ -1257,6 +1277,10 @@ func (c *ServerCommand) Run(args []string) int {
|
|||||||
return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Keep track of new subloggers in coreConfig.AllLoggers until we hand it
|
||||||
|
// off to core
|
||||||
|
c.SubloggerAdder = &coreConfig
|
||||||
|
|
||||||
if c.flagDevFourCluster {
|
if c.flagDevFourCluster {
|
||||||
return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
||||||
}
|
}
|
||||||
@@ -1344,6 +1368,10 @@ func (c *ServerCommand) Run(args []string) int {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now we can use the core SubloggerHook to add any new subloggers to
|
||||||
|
// core.allLoggers
|
||||||
|
c.SubloggerAdder = core
|
||||||
|
|
||||||
// Copy the reload funcs pointers back
|
// Copy the reload funcs pointers back
|
||||||
c.reloadFuncs = coreConfig.ReloadFuncs
|
c.reloadFuncs = coreConfig.ReloadFuncs
|
||||||
c.reloadFuncsLock = coreConfig.ReloadFuncsLock
|
c.reloadFuncsLock = coreConfig.ReloadFuncsLock
|
||||||
@@ -1820,6 +1848,7 @@ func (c *ServerCommand) configureLogging(config *server.Config) (hclog.Intercept
|
|||||||
LogRotateDuration: logRotateDuration,
|
LogRotateDuration: logRotateDuration,
|
||||||
LogRotateBytes: config.LogRotateBytes,
|
LogRotateBytes: config.LogRotateBytes,
|
||||||
LogRotateMaxFiles: config.LogRotateMaxFiles,
|
LogRotateMaxFiles: config.LogRotateMaxFiles,
|
||||||
|
SubloggerHook: c.AppendToAllLoggers,
|
||||||
}
|
}
|
||||||
|
|
||||||
return loghelper.Setup(logCfg, c.logWriter)
|
return loghelper.Setup(logCfg, c.logWriter)
|
||||||
@@ -2527,7 +2556,6 @@ func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info ma
|
|||||||
|
|
||||||
var seal vault.Seal
|
var seal vault.Seal
|
||||||
sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
|
sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
|
||||||
c.allLoggers = append(c.allLoggers, sealLogger)
|
|
||||||
defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper()))
|
defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper()))
|
||||||
var sealInfoKeys []string
|
var sealInfoKeys []string
|
||||||
sealInfoMap := map[string]string{}
|
sealInfoMap := map[string]string{}
|
||||||
@@ -2582,7 +2610,6 @@ func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
namedHALogger := c.logger.Named("ha." + config.HAStorage.Type)
|
namedHALogger := c.logger.Named("ha." + config.HAStorage.Type)
|
||||||
c.allLoggers = append(c.allLoggers, namedHALogger)
|
|
||||||
habackend, err := factory(config.HAStorage.Config, namedHALogger)
|
habackend, err := factory(config.HAStorage.Config, namedHALogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err)
|
return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err)
|
||||||
|
|||||||
@@ -48,6 +48,17 @@ type LogConfig struct {
|
|||||||
|
|
||||||
// LogRotateMaxFiles is the maximum number of past archived log files to keep
|
// LogRotateMaxFiles is the maximum number of past archived log files to keep
|
||||||
LogRotateMaxFiles int
|
LogRotateMaxFiles int
|
||||||
|
|
||||||
|
// SubloggerHook handles creation of new subloggers, automatically appending
|
||||||
|
// them to core's running list of allLoggers.
|
||||||
|
// see: server.AppendToAllLoggers for more details.
|
||||||
|
SubloggerHook func(log.Logger) log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubloggerAdder is an interface which facilitates tracking of new subloggers
|
||||||
|
// added between phases of server startup.
|
||||||
|
type SubloggerAdder interface {
|
||||||
|
SubloggerHook(logger log.Logger) log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LogConfig) isLevelInvalid() bool {
|
func (c *LogConfig) isLevelInvalid() bool {
|
||||||
@@ -148,6 +159,7 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) {
|
|||||||
IndependentLevels: true,
|
IndependentLevels: true,
|
||||||
Output: io.MultiWriter(writers...),
|
Output: io.MultiWriter(writers...),
|
||||||
JSONFormat: config.isFormatJson(),
|
JSONFormat: config.isFormatJson(),
|
||||||
|
SubloggerHook: config.SubloggerHook,
|
||||||
})
|
})
|
||||||
|
|
||||||
return logger, nil
|
return logger, nil
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/vault/audit"
|
"github.com/hashicorp/vault/audit"
|
||||||
"github.com/hashicorp/vault/builtin/credential/approle"
|
"github.com/hashicorp/vault/builtin/credential/approle"
|
||||||
|
"github.com/hashicorp/vault/helper/logging"
|
||||||
"github.com/hashicorp/vault/plugins/database/mysql"
|
"github.com/hashicorp/vault/plugins/database/mysql"
|
||||||
"github.com/hashicorp/vault/sdk/framework"
|
"github.com/hashicorp/vault/sdk/framework"
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||||
@@ -418,6 +419,32 @@ type TestLogger struct {
|
|||||||
Path string
|
Path string
|
||||||
File *os.File
|
File *os.File
|
||||||
sink hclog.SinkAdapter
|
sink hclog.SinkAdapter
|
||||||
|
// For managing temporary start-up state
|
||||||
|
sync.RWMutex
|
||||||
|
AllLoggers []hclog.Logger
|
||||||
|
logging.SubloggerAdder
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterSubloggerAdder checks to see if the provided logger interface is a
|
||||||
|
// TestLogger and re-assigns the SubloggerHook implementation if so.
|
||||||
|
func RegisterSubloggerAdder(logger hclog.Logger, adder logging.SubloggerAdder) {
|
||||||
|
if l, ok := logger.(*TestLogger); ok {
|
||||||
|
l.Lock()
|
||||||
|
l.SubloggerAdder = adder
|
||||||
|
l.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendToAllLoggers appends the sub logger to allLoggers, or if the TestLogger
|
||||||
|
// is assigned to a SubloggerAdder implementation, it calls the underlying hook.
|
||||||
|
func (l *TestLogger) AppendToAllLoggers(sub hclog.Logger) hclog.Logger {
|
||||||
|
l.Lock()
|
||||||
|
defer l.Unlock()
|
||||||
|
if l.SubloggerAdder == nil {
|
||||||
|
l.AllLoggers = append(l.AllLoggers, sub)
|
||||||
|
return sub
|
||||||
|
}
|
||||||
|
return l.SubloggerHook(sub)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTestLogger(t testing.T) *TestLogger {
|
func NewTestLogger(t testing.T) *TestLogger {
|
||||||
@@ -441,25 +468,31 @@ func NewTestLogger(t testing.T) *TestLogger {
|
|||||||
output = logFile
|
output = logFile
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{
|
||||||
|
Output: output,
|
||||||
|
Level: hclog.Trace,
|
||||||
|
IndependentLevels: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
testLogger := &TestLogger{
|
||||||
|
Path: logPath,
|
||||||
|
File: logFile,
|
||||||
|
sink: sink,
|
||||||
|
}
|
||||||
|
|
||||||
// We send nothing on the regular logger, that way we can later deregister
|
// We send nothing on the regular logger, that way we can later deregister
|
||||||
// the sink to stop logging during cluster cleanup.
|
// the sink to stop logging during cluster cleanup.
|
||||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
Output: io.Discard,
|
Output: io.Discard,
|
||||||
IndependentLevels: true,
|
IndependentLevels: true,
|
||||||
Name: t.Name(),
|
Name: t.Name(),
|
||||||
|
SubloggerHook: testLogger.AppendToAllLoggers,
|
||||||
})
|
})
|
||||||
sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{
|
|
||||||
Output: output,
|
|
||||||
Level: hclog.Trace,
|
|
||||||
IndependentLevels: true,
|
|
||||||
})
|
|
||||||
logger.RegisterSink(sink)
|
logger.RegisterSink(sink)
|
||||||
return &TestLogger{
|
testLogger.InterceptLogger = logger
|
||||||
Path: logPath,
|
|
||||||
File: logFile,
|
return testLogger
|
||||||
InterceptLogger: logger,
|
|
||||||
sink: sink,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tl *TestLogger) StopLogging() {
|
func (tl *TestLogger) StopLogging() {
|
||||||
|
|||||||
@@ -1104,7 +1104,6 @@ func (c *Core) setupActivityLog(ctx context.Context, wg *sync.WaitGroup) error {
|
|||||||
// this function should be called with activityLogLock.
|
// this function should be called with activityLogLock.
|
||||||
func (c *Core) setupActivityLogLocked(ctx context.Context, wg *sync.WaitGroup) error {
|
func (c *Core) setupActivityLogLocked(ctx context.Context, wg *sync.WaitGroup) error {
|
||||||
logger := c.baseLogger.Named("activity")
|
logger := c.baseLogger.Named("activity")
|
||||||
c.AddLogger(logger)
|
|
||||||
|
|
||||||
if os.Getenv("VAULT_DISABLE_ACTIVITY_LOG") != "" {
|
if os.Getenv("VAULT_DISABLE_ACTIVITY_LOG") != "" {
|
||||||
if c.CensusLicensingEnabled() {
|
if c.CensusLicensingEnabled() {
|
||||||
|
|||||||
@@ -383,7 +383,6 @@ func (c *Core) persistAudit(ctx context.Context, table *MountTable, localOnly bo
|
|||||||
// initialize the audit backends
|
// initialize the audit backends
|
||||||
func (c *Core) setupAudits(ctx context.Context) error {
|
func (c *Core) setupAudits(ctx context.Context) error {
|
||||||
brokerLogger := c.baseLogger.Named("audit")
|
brokerLogger := c.baseLogger.Named("audit")
|
||||||
c.AddLogger(brokerLogger)
|
|
||||||
broker, err := NewAuditBroker(brokerLogger, c.IsExperimentEnabled(experiments.VaultExperimentCoreAuditEventsAlpha1))
|
broker, err := NewAuditBroker(brokerLogger, c.IsExperimentEnabled(experiments.VaultExperimentCoreAuditEventsAlpha1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -495,7 +494,6 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi
|
|||||||
}
|
}
|
||||||
|
|
||||||
auditLogger := c.baseLogger.Named("audit")
|
auditLogger := c.baseLogger.Named("audit")
|
||||||
c.AddLogger(auditLogger)
|
|
||||||
|
|
||||||
switch entry.Type {
|
switch entry.Type {
|
||||||
case "file":
|
case "file":
|
||||||
|
|||||||
@@ -998,7 +998,6 @@ func (c *Core) newCredentialBackend(ctx context.Context, entry *MountEntry, sysV
|
|||||||
conf["plugin_version"] = entry.Version
|
conf["plugin_version"] = entry.Version
|
||||||
|
|
||||||
authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor))
|
authLogger := c.baseLogger.Named(fmt.Sprintf("auth.%s.%s", t, entry.Accessor))
|
||||||
c.AddLogger(authLogger)
|
|
||||||
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
|
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
|
||||||
MountClass: consts.PluginTypeCredential.String(),
|
MountClass: consts.PluginTypeCredential.String(),
|
||||||
MountAccessor: entry.Accessor,
|
MountAccessor: entry.Accessor,
|
||||||
|
|||||||
@@ -323,7 +323,6 @@ func (c *Core) startClusterListener(ctx context.Context) error {
|
|||||||
if networkLayer == nil {
|
if networkLayer == nil {
|
||||||
tcpLogger := c.logger.Named("cluster-listener.tcp")
|
tcpLogger := c.logger.Named("cluster-listener.tcp")
|
||||||
networkLayer = cluster.NewTCPLayer(c.clusterListenerAddrs, tcpLogger)
|
networkLayer = cluster.NewTCPLayer(c.clusterListenerAddrs, tcpLogger)
|
||||||
c.AddLogger(tcpLogger)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
listenerLogger := c.logger.Named("cluster-listener")
|
listenerLogger := c.logger.Named("cluster-listener")
|
||||||
@@ -332,8 +331,6 @@ func (c *Core) startClusterListener(ctx context.Context) error {
|
|||||||
listenerLogger,
|
listenerLogger,
|
||||||
5*c.clusterHeartbeatInterval))
|
5*c.clusterHeartbeatInterval))
|
||||||
|
|
||||||
c.AddLogger(listenerLogger)
|
|
||||||
|
|
||||||
err := c.getClusterListener().Run(ctx)
|
err := c.getClusterListener().Run(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -854,6 +854,13 @@ type CoreConfig struct {
|
|||||||
AdministrativeNamespacePath string
|
AdministrativeNamespacePath string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubloggerHook implements the SubloggerAdder interface. This implementation
|
||||||
|
// manages CoreConfig.AllLoggers state prior to (and during) NewCore.
|
||||||
|
func (c *CoreConfig) SubloggerHook(logger log.Logger) log.Logger {
|
||||||
|
c.AllLoggers = append(c.AllLoggers, logger)
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
|
||||||
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
|
// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
|
||||||
// not exist.
|
// not exist.
|
||||||
func (c *CoreConfig) GetServiceRegistration() sr.ServiceRegistration {
|
func (c *CoreConfig) GetServiceRegistration() sr.ServiceRegistration {
|
||||||
@@ -1024,10 +1031,7 @@ func CreateCore(conf *CoreConfig) (*Core, error) {
|
|||||||
|
|
||||||
c.shutdownDoneCh.Store(make(chan struct{}))
|
c.shutdownDoneCh.Store(make(chan struct{}))
|
||||||
|
|
||||||
c.allLoggers = append(c.allLoggers, c.logger)
|
|
||||||
|
|
||||||
c.router.logger = c.logger.Named("router")
|
c.router.logger = c.logger.Named("router")
|
||||||
c.allLoggers = append(c.allLoggers, c.router.logger)
|
|
||||||
|
|
||||||
c.inFlightReqData = &InFlightRequests{
|
c.inFlightReqData = &InFlightRequests{
|
||||||
InFlightReqMap: &sync.Map{},
|
InFlightReqMap: &sync.Map{},
|
||||||
@@ -1105,7 +1109,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = coreInit(c, conf); err != nil {
|
if err = coreInit(c, conf); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1176,10 +1179,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
|
|
||||||
c.loginMFABackend = NewLoginMFABackend(c, conf.Logger)
|
c.loginMFABackend = NewLoginMFABackend(c, conf.Logger)
|
||||||
|
|
||||||
if c.loginMFABackend.mfaLogger != nil {
|
|
||||||
c.AddLogger(c.loginMFABackend.mfaLogger)
|
|
||||||
}
|
|
||||||
|
|
||||||
logicalBackends := make(map[string]logical.Factory)
|
logicalBackends := make(map[string]logical.Factory)
|
||||||
for k, f := range conf.LogicalBackends {
|
for k, f := range conf.LogicalBackends {
|
||||||
logicalBackends[k] = f
|
logicalBackends[k] = f
|
||||||
@@ -1192,7 +1191,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
logicalBackends["cubbyhole"] = CubbyholeBackendFactory
|
||||||
logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
logicalBackends[systemMountType] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
sysBackendLogger := conf.Logger.Named("system")
|
sysBackendLogger := conf.Logger.Named("system")
|
||||||
c.AddLogger(sysBackendLogger)
|
|
||||||
b := NewSystemBackend(c, sysBackendLogger)
|
b := NewSystemBackend(c, sysBackendLogger)
|
||||||
if err := b.Setup(ctx, config); err != nil {
|
if err := b.Setup(ctx, config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1201,7 +1199,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
}
|
}
|
||||||
logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
logicalBackends["identity"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
identityLogger := conf.Logger.Named("identity")
|
identityLogger := conf.Logger.Named("identity")
|
||||||
c.AddLogger(identityLogger)
|
|
||||||
return NewIdentityStore(ctx, c, config, identityLogger)
|
return NewIdentityStore(ctx, c, config, identityLogger)
|
||||||
}
|
}
|
||||||
addExtraLogicalBackends(c, logicalBackends, conf.AdministrativeNamespacePath)
|
addExtraLogicalBackends(c, logicalBackends, conf.AdministrativeNamespacePath)
|
||||||
@@ -1213,7 +1210,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
}
|
}
|
||||||
credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
credentialBackends["token"] = func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) {
|
||||||
tsLogger := conf.Logger.Named("token")
|
tsLogger := conf.Logger.Named("token")
|
||||||
c.AddLogger(tsLogger)
|
|
||||||
return NewTokenStore(ctx, tsLogger, c, config)
|
return NewTokenStore(ctx, tsLogger, c, config)
|
||||||
}
|
}
|
||||||
addExtraCredentialBackends(c, credentialBackends)
|
addExtraCredentialBackends(c, credentialBackends)
|
||||||
@@ -1251,7 +1247,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
quotasLogger := conf.Logger.Named("quotas")
|
quotasLogger := conf.Logger.Named("quotas")
|
||||||
c.allLoggers = append(c.allLoggers, quotasLogger)
|
|
||||||
c.quotaManager, err = quotas.NewManager(quotasLogger, c.quotaLeaseWalker, c.metricSink)
|
c.quotaManager, err = quotas.NewManager(quotasLogger, c.quotaLeaseWalker, c.metricSink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1269,7 +1264,6 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
|
|
||||||
// start the event system
|
// start the event system
|
||||||
eventsLogger := conf.Logger.Named("events")
|
eventsLogger := conf.Logger.Named("events")
|
||||||
c.allLoggers = append(c.allLoggers, eventsLogger)
|
|
||||||
events, err := eventbus.NewEventBus(eventsLogger)
|
events, err := eventbus.NewEventBus(eventsLogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -1279,6 +1273,10 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||||||
c.events.Start()
|
c.events.Start()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure we're keeping track of the subloggers added above. We haven't
|
||||||
|
// yet registered core to the server command's SubloggerAdder, so any new
|
||||||
|
// subloggers will be in conf.AllLoggers.
|
||||||
|
c.allLoggers = conf.AllLoggers
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3041,6 +3039,14 @@ func (c *Core) AddLogger(logger log.Logger) {
|
|||||||
c.allLoggers = append(c.allLoggers, logger)
|
c.allLoggers = append(c.allLoggers, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubloggerHook implements the SubloggerAdder interface. We add this method to
|
||||||
|
// the server command after NewCore returns with a Core object. The hook keeps
|
||||||
|
// track of newly added subloggers without manual calls to c.AddLogger.
|
||||||
|
func (c *Core) SubloggerHook(logger log.Logger) log.Logger {
|
||||||
|
c.AddLogger(logger)
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
|
||||||
// SetLogLevel sets logging level for all tracked loggers to the level provided
|
// SetLogLevel sets logging level for all tracked loggers to the level provided
|
||||||
func (c *Core) SetLogLevel(level log.Level) {
|
func (c *Core) SetLogLevel(level log.Level) {
|
||||||
c.allLoggersLock.RLock()
|
c.allLoggersLock.RLock()
|
||||||
|
|||||||
@@ -40,11 +40,9 @@ func coreInit(c *Core, conf *CoreConfig) error {
|
|||||||
phys := conf.Physical
|
phys := conf.Physical
|
||||||
_, txnOK := phys.(physical.Transactional)
|
_, txnOK := phys.(physical.Transactional)
|
||||||
sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper")
|
sealUnwrapperLogger := conf.Logger.Named("storage.sealunwrapper")
|
||||||
c.allLoggers = append(c.allLoggers, sealUnwrapperLogger)
|
|
||||||
c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger)
|
c.sealUnwrapper = NewSealUnwrapper(phys, sealUnwrapperLogger)
|
||||||
// Wrap the physical backend in a cache layer if enabled
|
// Wrap the physical backend in a cache layer if enabled
|
||||||
cacheLogger := c.baseLogger.Named("storage.cache")
|
cacheLogger := c.baseLogger.Named("storage.cache")
|
||||||
c.allLoggers = append(c.allLoggers, cacheLogger)
|
|
||||||
if txnOK {
|
if txnOK {
|
||||||
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger, c.MetricSink().Sink)
|
c.physical = physical.NewTransactionalCache(c.sealUnwrapper, conf.CacheSize, cacheLogger, c.MetricSink().Sink)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -328,8 +328,6 @@ func NewExpirationManager(c *Core, view *BarrierView, e ExpireLeaseStrategy, log
|
|||||||
jobManager := fairshare.NewJobManager("expire", getNumExpirationWorkers(c, logger), managerLogger, c.metricSink)
|
jobManager := fairshare.NewJobManager("expire", getNumExpirationWorkers(c, logger), managerLogger, c.metricSink)
|
||||||
jobManager.Start()
|
jobManager.Start()
|
||||||
|
|
||||||
c.AddLogger(managerLogger)
|
|
||||||
|
|
||||||
exp := &ExpirationManager{
|
exp := &ExpirationManager{
|
||||||
core: c,
|
core: c,
|
||||||
router: c.router,
|
router: c.router,
|
||||||
@@ -388,7 +386,6 @@ func (c *Core) setupExpiration(e ExpireLeaseStrategy) error {
|
|||||||
|
|
||||||
// Create the manager
|
// Create the manager
|
||||||
expLogger := c.baseLogger.Named("expiration")
|
expLogger := c.baseLogger.Named("expiration")
|
||||||
c.AddLogger(expLogger)
|
|
||||||
mgr := NewExpirationManager(c, view, e, expLogger)
|
mgr := NewExpirationManager(c, view, e, expLogger)
|
||||||
c.expiration = mgr
|
c.expiration = mgr
|
||||||
|
|
||||||
@@ -544,7 +541,6 @@ func (m *ExpirationManager) Tidy(ctx context.Context) error {
|
|||||||
var tidyErrors *multierror.Error
|
var tidyErrors *multierror.Error
|
||||||
|
|
||||||
logger := m.logger.Named("tidy")
|
logger := m.logger.Named("tidy")
|
||||||
m.core.AddLogger(logger)
|
|
||||||
|
|
||||||
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
|
if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) {
|
||||||
logger.Warn("tidy operation on leases is already in progress")
|
logger.Warn("tidy operation on leases is already in progress")
|
||||||
|
|||||||
@@ -75,11 +75,8 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities")
|
entitiesPackerLogger := iStore.logger.Named("storagepacker").Named("entities")
|
||||||
core.AddLogger(entitiesPackerLogger)
|
|
||||||
localAliasesPackerLogger := iStore.logger.Named("storagepacker").Named("local-aliases")
|
localAliasesPackerLogger := iStore.logger.Named("storagepacker").Named("local-aliases")
|
||||||
core.AddLogger(localAliasesPackerLogger)
|
|
||||||
groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups")
|
groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups")
|
||||||
core.AddLogger(groupsPackerLogger)
|
|
||||||
|
|
||||||
iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "")
|
iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, entitiesPackerLogger, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ import (
|
|||||||
"github.com/hashicorp/vault/sdk/helper/compressutil"
|
"github.com/hashicorp/vault/sdk/helper/compressutil"
|
||||||
"github.com/hashicorp/vault/sdk/helper/consts"
|
"github.com/hashicorp/vault/sdk/helper/consts"
|
||||||
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
"github.com/hashicorp/vault/sdk/helper/jsonutil"
|
||||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
|
||||||
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
"github.com/hashicorp/vault/sdk/helper/pluginutil"
|
||||||
"github.com/hashicorp/vault/sdk/helper/testhelpers/schema"
|
"github.com/hashicorp/vault/sdk/helper/testhelpers/schema"
|
||||||
"github.com/hashicorp/vault/sdk/logical"
|
"github.com/hashicorp/vault/sdk/logical"
|
||||||
@@ -5460,6 +5459,13 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"events",
|
||||||
|
"invalid",
|
||||||
|
"does-not-matter",
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"",
|
"",
|
||||||
"info",
|
"info",
|
||||||
@@ -5482,10 +5488,9 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
t.Run(fmt.Sprintf("loggers-by-name-%s", tc.logger), func(t *testing.T) {
|
t.Run(fmt.Sprintf("loggers-by-name-%s", tc.logger), func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
core, _, _ := TestCoreUnsealedWithConfig(t, &CoreConfig{
|
core, _, _ := TestCoreUnsealed(t)
|
||||||
Logger: logging.NewVaultLogger(hclog.Trace),
|
|
||||||
})
|
|
||||||
b := core.systemBackend
|
b := core.systemBackend
|
||||||
|
testLoggerName := t.Name() + "." + tc.logger
|
||||||
|
|
||||||
// Test core overrides logging level outside of config,
|
// Test core overrides logging level outside of config,
|
||||||
// an initial delete will ensure that we an initial read
|
// an initial delete will ensure that we an initial read
|
||||||
@@ -5514,7 +5519,7 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
initialLoggers := resp.Data
|
initialLoggers := resp.Data
|
||||||
|
|
||||||
req = &logical.Request{
|
req = &logical.Request{
|
||||||
Path: fmt.Sprintf("loggers/%s", tc.logger),
|
Path: fmt.Sprintf("loggers/%s", testLoggerName),
|
||||||
Operation: logical.UpdateOperation,
|
Operation: logical.UpdateOperation,
|
||||||
Data: map[string]interface{}{
|
Data: map[string]interface{}{
|
||||||
"level": tc.level,
|
"level": tc.level,
|
||||||
@@ -5559,14 +5564,14 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
t.Fatalf("expected logger %q to be %q, actual: %s", loggerName, tc.expectedLevel, levelStr)
|
t.Fatalf("expected logger %q to be %q, actual: %s", loggerName, tc.expectedLevel, levelStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if loggerName != tc.logger && levelStr != initialLevelStr {
|
if loggerName != testLoggerName && levelStr != initialLevelStr {
|
||||||
t.Errorf("expected level of logger %q to be unchanged, exepcted: %s, actual: %s", loggerName, initialLevelStr, levelStr)
|
t.Errorf("expected level of logger %q to be unchanged, expected: %s, actual: %s", loggerName, initialLevelStr, levelStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
req = &logical.Request{
|
req = &logical.Request{
|
||||||
Path: fmt.Sprintf("loggers/%s", tc.logger),
|
Path: fmt.Sprintf("loggers/%s", testLoggerName),
|
||||||
Operation: logical.DeleteOperation,
|
Operation: logical.DeleteOperation,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5583,7 +5588,7 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
|
|
||||||
if !tc.expectDeleteError {
|
if !tc.expectDeleteError {
|
||||||
req = &logical.Request{
|
req = &logical.Request{
|
||||||
Path: fmt.Sprintf("loggers/%s", tc.logger),
|
Path: fmt.Sprintf("loggers/%s", testLoggerName),
|
||||||
Operation: logical.ReadOperation,
|
Operation: logical.ReadOperation,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5592,18 +5597,18 @@ func TestSystemBackend_LoggersByName(t *testing.T) {
|
|||||||
t.Fatalf("unexpected error, err: %v, resp: %#v", err, resp)
|
t.Fatalf("unexpected error, err: %v, resp: %#v", err, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
currentLevel, ok := resp.Data[tc.logger].(string)
|
currentLevel, ok := resp.Data[testLoggerName].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected resp to include %q, resp: %#v", tc.logger, resp)
|
t.Fatalf("expected resp to include %q, resp: %#v", testLoggerName, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
initialLevel, ok := initialLoggers[tc.logger].(string)
|
initialLevel, ok := initialLoggers[testLoggerName].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected initial loggers to include %q, resp: %#v", tc.logger, initialLoggers)
|
t.Fatalf("expected initial loggers to include %q, resp: %#v", testLoggerName, initialLoggers)
|
||||||
}
|
}
|
||||||
|
|
||||||
if currentLevel != initialLevel {
|
if currentLevel != initialLevel {
|
||||||
t.Errorf("expected level of logger %q to match original config, expected: %s, actual: %s", tc.logger, initialLevel, currentLevel)
|
t.Errorf("expected level of logger %q to match original config, expected: %s, actual: %s", testLoggerName, initialLevel, currentLevel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1703,7 +1703,6 @@ func (c *Core) newLogicalBackend(ctx context.Context, entry *MountEntry, sysView
|
|||||||
conf["plugin_version"] = entry.Version
|
conf["plugin_version"] = entry.Version
|
||||||
|
|
||||||
backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor))
|
backendLogger := c.baseLogger.Named(fmt.Sprintf("secrets.%s.%s", t, entry.Accessor))
|
||||||
c.AddLogger(backendLogger)
|
|
||||||
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
|
pluginEventSender, err := c.events.WithPlugin(entry.namespace, &logical.EventPluginInfo{
|
||||||
MountClass: consts.PluginTypeSecrets.String(),
|
MountClass: consts.PluginTypeSecrets.String(),
|
||||||
MountAccessor: entry.Accessor,
|
MountAccessor: entry.Accessor,
|
||||||
|
|||||||
@@ -256,7 +256,6 @@ func (c *Core) setupPolicyStore(ctx context.Context) error {
|
|||||||
var err error
|
var err error
|
||||||
sysView := &dynamicSystemView{core: c, perfStandby: c.perfStandby}
|
sysView := &dynamicSystemView{core: c, perfStandby: c.perfStandby}
|
||||||
psLogger := c.baseLogger.Named("policy")
|
psLogger := c.baseLogger.Named("policy")
|
||||||
c.AddLogger(psLogger)
|
|
||||||
c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger)
|
c.policyStore, err = NewPolicyStore(ctx, c, c.systemBarrierView, sysView, psLogger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -359,7 +359,6 @@ func (c *Core) startPeriodicRaftTLSRotate(ctx context.Context) error {
|
|||||||
|
|
||||||
c.raftTLSRotationStopCh = make(chan struct{})
|
c.raftTLSRotationStopCh = make(chan struct{})
|
||||||
logger := c.logger.Named("raft")
|
logger := c.logger.Named("raft")
|
||||||
c.AddLogger(logger)
|
|
||||||
|
|
||||||
if c.isRaftHAOnly() {
|
if c.isRaftHAOnly() {
|
||||||
return c.raftTLSRotateDirect(ctx, logger, c.raftTLSRotationStopCh)
|
return c.raftTLSRotateDirect(ctx, logger, c.raftTLSRotationStopCh)
|
||||||
|
|||||||
@@ -323,7 +323,6 @@ func (c *Core) startRollback() error {
|
|||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
rollbackLogger := c.baseLogger.Named("rollback")
|
rollbackLogger := c.baseLogger.Named("rollback")
|
||||||
c.AddLogger(rollbackLogger)
|
|
||||||
c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c)
|
c.rollback = NewRollbackManager(c.activeContext, rollbackLogger, backendsFunc, c.router, c)
|
||||||
c.rollback.Start()
|
c.rollback.Start()
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -89,7 +89,6 @@ func (d *autoSeal) SetCore(core *Core) {
|
|||||||
d.core = core
|
d.core = core
|
||||||
if d.logger == nil {
|
if d.logger == nil {
|
||||||
d.logger = d.core.Logger().Named("autoseal")
|
d.logger = d.core.Logger().Named("autoseal")
|
||||||
d.core.AddLogger(d.logger)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -204,6 +204,8 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
|||||||
// Start off with base test core config
|
// Start off with base test core config
|
||||||
conf := testCoreConfig(t, errInjector, logger)
|
conf := testCoreConfig(t, errInjector, logger)
|
||||||
|
|
||||||
|
corehelpers.RegisterSubloggerAdder(logger, conf)
|
||||||
|
|
||||||
// Override config values with ones that gets passed in
|
// Override config values with ones that gets passed in
|
||||||
conf.EnableUI = opts.EnableUI
|
conf.EnableUI = opts.EnableUI
|
||||||
conf.EnableRaw = opts.EnableRaw
|
conf.EnableRaw = opts.EnableRaw
|
||||||
@@ -222,6 +224,7 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
|||||||
conf.Experiments = opts.Experiments
|
conf.Experiments = opts.Experiments
|
||||||
conf.CensusAgent = opts.CensusAgent
|
conf.CensusAgent = opts.CensusAgent
|
||||||
conf.AdministrativeNamespacePath = opts.AdministrativeNamespacePath
|
conf.AdministrativeNamespacePath = opts.AdministrativeNamespacePath
|
||||||
|
conf.AllLoggers = logger.AllLoggers
|
||||||
|
|
||||||
if opts.Logger != nil {
|
if opts.Logger != nil {
|
||||||
conf.Logger = opts.Logger
|
conf.Logger = opts.Logger
|
||||||
@@ -250,6 +253,8 @@ func TestCoreWithSealAndUINoCleanup(t testing.T, opts *CoreConfig) *Core {
|
|||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Switch the SubloggerHook over to core
|
||||||
|
corehelpers.RegisterSubloggerAdder(logger, c)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1521,6 +1526,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(),
|
BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
corehelpers.RegisterSubloggerAdder(testCluster.Logger, coreConfig)
|
||||||
|
|
||||||
if base != nil {
|
if base != nil {
|
||||||
coreConfig.DetectDeadlocks = TestDeadlockDetection
|
coreConfig.DetectDeadlocks = TestDeadlockDetection
|
||||||
coreConfig.RawConfig = base.RawConfig
|
coreConfig.RawConfig = base.RawConfig
|
||||||
@@ -1688,6 +1695,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
|||||||
for i := 0; i < numCores; i++ {
|
for i := 0; i < numCores; i++ {
|
||||||
cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey)
|
cleanup, c, localConfig, handler := testCluster.newCore(t, i, coreConfig, opts, listeners[i], testCluster.LicensePublicKey)
|
||||||
|
|
||||||
|
corehelpers.RegisterSubloggerAdder(testCluster.Logger, c)
|
||||||
|
|
||||||
testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup)
|
testCluster.cleanupFuncs = append(testCluster.cleanupFuncs, cleanup)
|
||||||
cores = append(cores, c)
|
cores = append(cores, c)
|
||||||
coreConfigs = append(coreConfigs, &localConfig)
|
coreConfigs = append(coreConfigs, &localConfig)
|
||||||
|
|||||||
Reference in New Issue
Block a user