Logging tweaks, mostly for tests (#10028)

Fix some places where raft wasn't hooking into the core logger as it should.
Revisited the code that was setting the log level to Error during cleanup: it's normal for there to be a bunch of errors then, which makes it harder to see what went wrong up to the point where the test was deemed to have failed.  So now, instead of setting log level to Error, we actually stop logging altogether.  This only applies if the test didn't pass in its own logger during cluster creation, but we should be moving away from that anyway.
This commit is contained in:
ncabatoff
2020-09-23 13:40:00 -04:00
committed by GitHub
parent d5c20be2dc
commit 066678f5f0
3 changed files with 40 additions and 35 deletions

View File

@@ -98,7 +98,7 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger) *vault.Physi
"performance_multiplier": "8",
}
backend, err := raft.NewRaftBackend(conf, logger)
backend, err := raft.NewRaftBackend(conf, logger.Named("raft"))
if err != nil {
cleanupFunc()
t.Fatal(err)

View File

@@ -607,6 +607,7 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error {
MaxPool: 3,
Timeout: 10 * time.Second,
ServerAddressProvider: b.serverAddressProvider,
Logger: b.logger.Named("raft-net"),
}
transport := raft.NewNetworkTransportWithConfig(transConfig)

View File

@@ -896,26 +896,10 @@ func (c *TestClusterCore) stop() error {
return nil
}
func CleanupClusters(clusters []*TestCluster) {
wg := &sync.WaitGroup{}
for _, cluster := range clusters {
wg.Add(1)
lc := cluster
go func() {
defer wg.Done()
lc.Cleanup()
}()
}
wg.Wait()
}
func (c *TestCluster) Cleanup() {
c.Logger.Info("cleaning up vault cluster")
for _, core := range c.Cores {
// Upgrade logger to emit errors if not doing so already
if !core.CoreConfig.Logger.IsError() {
core.CoreConfig.Logger.SetLevel(log.Error)
}
if tl, ok := c.Logger.(*TestLogger); ok {
tl.StopLogging()
}
wg := &sync.WaitGroup{}
@@ -926,6 +910,8 @@ func (c *TestCluster) Cleanup() {
go func() {
defer wg.Done()
if err := lc.stop(); err != nil {
// Note that this log won't be seen if using TestLogger, due to
// the above call to StopLogging.
lc.Logger().Error("error during cleanup", "error", err)
}
}()
@@ -1078,34 +1064,52 @@ type TestLogger struct {
log.Logger
Path string
File *os.File
sink log.SinkAdapter
}
func NewTestLogger(t testing.T) *TestLogger {
var logFile *os.File
var logPath string
output := os.Stderr
var logDir = os.Getenv("VAULT_TEST_LOG_DIR")
if logDir == "" {
return &TestLogger{
Logger: logging.NewVaultLogger(log.Trace).Named(t.Name()),
if logDir != "" {
logPath = filepath.Join(logDir, t.Name()+".log")
// t.Name may include slashes.
dir, _ := filepath.Split(logPath)
err := os.MkdirAll(dir, 0755)
if err != nil {
t.Fatal(err)
}
logFile, err = os.Create(logPath)
if err != nil {
t.Fatal(err)
}
output = logFile
}
logFileName := filepath.Join(logDir, t.Name()+".log")
// t.Name may include slashes.
dir, _ := filepath.Split(logFileName)
if err := os.MkdirAll(dir, 0755); err != nil {
t.Fatal(err)
}
logFile, err := os.Create(logFileName)
if err != nil {
t.Fatal(err)
}
// We send nothing on the regular logger, that way we can later deregister
// the sink to stop logging during cluster cleanup.
logger := log.NewInterceptLogger(&log.LoggerOptions{
Output: ioutil.Discard,
})
sink := log.NewSinkAdapter(&log.LoggerOptions{
Output: output,
Level: log.Trace,
})
logger.RegisterSink(sink)
return &TestLogger{
Path: logFileName,
Path: logPath,
File: logFile,
Logger: logging.NewVaultLoggerWithWriter(logFile, log.Trace),
Logger: logger,
sink: sink,
}
}
func (tl *TestLogger) StopLogging() {
tl.Logger.(log.InterceptLogger).DeregisterSink(tl.sink)
}
// NewTestCluster creates a new test cluster based on the provided core config
// and test cluster options.
//