Merge branch 'main' into peteski22/handleAuditNonLogical

This commit is contained in:
Peter Wilson
2023-12-19 10:31:54 +00:00
committed by GitHub
359 changed files with 5751 additions and 1875 deletions

View File

@@ -10,8 +10,8 @@
- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce)
- Discussion forum: [Discuss](https://discuss.hashicorp.com/c/vault)
- Documentation: [https://developer.hashicorp.com/vault/docs](https://developer.hashicorp.com/vault/docs)
- Tutorials: [HashiCorp's Learn Platform](https://learn.hashicorp.com/vault)
- Certification Exam: [Vault Associate](https://www.hashicorp.com/certification/#hashicorp-certified-vault-associate)
- Tutorials: [https://developer.hashicorp.com/vault/tutorials](https://developer.hashicorp.com/vault/tutorials)
- Certification Exam: [https://developer.hashicorp.com/certifications/security-automation](https://developer.hashicorp.com/certifications/security-automation)
<img width="300" alt="Vault Logo" src="https://github.com/hashicorp/vault/blob/f22d202cde2018f9455dec755118a9b84586e082/Vault_PrimaryLogo_Black.png">

91
audit/entry_filter.go Normal file
View File

@@ -0,0 +1,91 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package audit
import (
"context"
"fmt"
"strings"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internal/observability/event"
)
var _ eventlogger.Node = (*EntryFilter)(nil)
// NewEntryFilter should be used to create an EntryFilter node.
// The filter supplied should be in bexpr format and reference fields from logical.LogInputBexpr.
func NewEntryFilter(filter string) (*EntryFilter, error) {
const op = "audit.NewEntryFilter"
filter = strings.TrimSpace(filter)
if filter == "" {
return nil, fmt.Errorf("%s: cannot create new audit filter with empty filter expression: %w", op, event.ErrInvalidParameter)
}
eval, err := bexpr.CreateEvaluator(filter)
if err != nil {
return nil, fmt.Errorf("%s: cannot create new audit filter: %w", op, err)
}
return &EntryFilter{evaluator: eval}, nil
}
// Reopen is a no-op for the filter node.
func (*EntryFilter) Reopen() error {
return nil
}
// Type describes the type of this node (filter).
func (*EntryFilter) Type() eventlogger.NodeType {
return eventlogger.NodeTypeFilter
}
// Process will attempt to parse the incoming event data and decide whether it
// should be filtered or remain in the pipeline and passed to the next node.
func (f *EntryFilter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
const op = "audit.(EntryFilter).Process"
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if e == nil {
return nil, fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)
}
a, ok := e.Payload.(*AuditEvent)
if !ok {
return nil, fmt.Errorf("%s: cannot parse event payload: %w", op, event.ErrInvalidParameter)
}
// If we don't have data to process, then we're done.
if a.Data == nil {
return nil, nil
}
ns, err := namespace.FromContext(ctx)
if err != nil {
return nil, fmt.Errorf("%s: cannot obtain namespace: %w", op, err)
}
datum := a.Data.BexprDatum(ns.Path)
result, err := f.evaluator.Evaluate(datum)
if err != nil {
return nil, fmt.Errorf("%s: unable to evaluate filter: %w", op, err)
}
if result {
// Allow this event to carry on through the pipeline.
return e, nil
}
// End process of this pipeline.
return nil, nil
}

249
audit/entry_filter_test.go Normal file
View File

@@ -0,0 +1,249 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package audit
import (
"context"
"testing"
"time"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/require"
)
// TestEntryFilter_NewEntryFilter tests that we can create EntryFilter types correctly.
func TestEntryFilter_NewEntryFilter(t *testing.T) {
t.Parallel()
tests := map[string]struct {
Filter string
IsErrorExpected bool
ExpectedErrorMessage string
}{
"empty-filter": {
Filter: "",
IsErrorExpected: true,
ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter with empty filter expression: invalid parameter",
},
"spacey-filter": {
Filter: " ",
IsErrorExpected: true,
ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter with empty filter expression: invalid parameter",
},
"bad-filter": {
Filter: "____",
IsErrorExpected: true,
ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter",
},
"good-filter": {
Filter: "foo == bar",
IsErrorExpected: false,
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
f, err := NewEntryFilter(tc.Filter)
switch {
case tc.IsErrorExpected:
require.ErrorContains(t, err, tc.ExpectedErrorMessage)
require.Nil(t, f)
default:
require.NoError(t, err)
require.NotNil(t, f)
}
})
}
}
// TestEntryFilter_Reopen ensures we can reopen the filter node.
func TestEntryFilter_Reopen(t *testing.T) {
t.Parallel()
f := &EntryFilter{}
res := f.Reopen()
require.Nil(t, res)
}
// TestEntryFilter_Type ensures we always return the right type for this node.
func TestEntryFilter_Type(t *testing.T) {
t.Parallel()
f := &EntryFilter{}
require.Equal(t, eventlogger.NodeTypeFilter, f.Type())
}
// TestEntryFilter_Process_ContextDone ensures that we stop processing the event
// if the context was cancelled.
func TestEntryFilter_Process_ContextDone(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
// Explicitly cancel the context
cancel()
l, err := NewEntryFilter("foo == bar")
require.NoError(t, err)
// Fake audit event
a, err := NewEvent(RequestType)
require.NoError(t, err)
// Fake event logger event
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: a,
}
e2, err := l.Process(ctx, e)
require.Error(t, err)
require.ErrorContains(t, err, "context canceled")
// Ensure that the pipeline won't continue.
require.Nil(t, e2)
}
// TestEntryFilter_Process_NilEvent ensures we receive the right error when the
// event we are trying to process is nil.
func TestEntryFilter_Process_NilEvent(t *testing.T) {
t.Parallel()
l, err := NewEntryFilter("foo == bar")
require.NoError(t, err)
e, err := l.Process(context.Background(), nil)
require.Error(t, err)
require.EqualError(t, err, "audit.(EntryFilter).Process: event is nil: invalid parameter")
// Ensure that the pipeline won't continue.
require.Nil(t, e)
}
// TestEntryFilter_Process_BadPayload ensures we receive the correct error when
// attempting to process an event with a payload that cannot be parsed back to
// an audit event.
func TestEntryFilter_Process_BadPayload(t *testing.T) {
t.Parallel()
l, err := NewEntryFilter("foo == bar")
require.NoError(t, err)
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: nil,
}
e2, err := l.Process(context.Background(), e)
require.Error(t, err)
require.EqualError(t, err, "audit.(EntryFilter).Process: cannot parse event payload: invalid parameter")
// Ensure that the pipeline won't continue.
require.Nil(t, e2)
}
// TestEntryFilter_Process_NoAuditDataInPayload ensure we stop processing a pipeline
// when the data in the audit event is nil.
func TestEntryFilter_Process_NoAuditDataInPayload(t *testing.T) {
t.Parallel()
l, err := NewEntryFilter("foo == bar")
require.NoError(t, err)
a, err := NewEvent(RequestType)
require.NoError(t, err)
// Ensure audit data is nil
a.Data = nil
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: a,
}
e2, err := l.Process(context.Background(), e)
// Make sure we get the 'nil, nil' response to stop processing this pipeline.
require.NoError(t, err)
require.Nil(t, e2)
}
// TestEntryFilter_Process_FilterSuccess tests that when a filter matches we
// receive no error and the event is not nil so it continues in the pipeline.
func TestEntryFilter_Process_FilterSuccess(t *testing.T) {
t.Parallel()
l, err := NewEntryFilter("mount_type == juan")
require.NoError(t, err)
a, err := NewEvent(RequestType)
require.NoError(t, err)
a.Data = &logical.LogInput{
Request: &logical.Request{
Operation: logical.CreateOperation,
MountType: "juan",
},
}
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: a,
}
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
e2, err := l.Process(ctx, e)
require.NoError(t, err)
require.NotNil(t, e2)
}
// TestEntryFilter_Process_FilterFail tests that when a filter fails to match we
// receive no error, but also the event is nil so that the pipeline completes.
func TestEntryFilter_Process_FilterFail(t *testing.T) {
t.Parallel()
l, err := NewEntryFilter("mount_type == john and operation == create and namespace == root")
require.NoError(t, err)
a, err := NewEvent(RequestType)
require.NoError(t, err)
a.Data = &logical.LogInput{
Request: &logical.Request{
Operation: logical.CreateOperation,
MountType: "juan",
},
}
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: a,
}
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
e2, err := l.Process(ctx, e)
require.NoError(t, err)
require.Nil(t, e2)
}

View File

@@ -11,16 +11,13 @@ import (
"strings"
"time"
"github.com/jefferai/jsonx"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/logical"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/sdk/logical"
"github.com/jefferai/jsonx"
)
var (
@@ -29,7 +26,7 @@ var (
)
// NewEntryFormatter should be used to create an EntryFormatter.
// Accepted options: WithPrefix.
// Accepted options: WithHeaderFormatter, WithPrefix.
func NewEntryFormatter(config FormatterConfig, salter Salter, opt ...Option) (*EntryFormatter, error) {
const op = "audit.NewEntryFormatter"
@@ -80,7 +77,7 @@ func (f *EntryFormatter) Process(ctx context.Context, e *eventlogger.Event) (*ev
return nil, fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)
}
a, ok := e.Payload.(*auditEvent)
a, ok := e.Payload.(*AuditEvent)
if !ok {
return nil, fmt.Errorf("%s: cannot parse event payload: %w", op, event.ErrInvalidParameter)
}

View File

@@ -12,7 +12,7 @@ import (
// NewEvent should be used to create an audit event. The subtype field is needed
// for audit events. It will generate an ID if no ID is supplied. Supported
// options: WithID, WithNow.
func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
func NewEvent(s subtype, opt ...Option) (*AuditEvent, error) {
const op = "audit.newEvent"
// Get the default options
@@ -30,7 +30,7 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
}
}
audit := &auditEvent{
audit := &AuditEvent{
ID: opts.withID,
Timestamp: opts.withNow,
Version: version,
@@ -44,8 +44,8 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
}
// validate attempts to ensure the audit event in its present state is valid.
func (a *auditEvent) validate() error {
const op = "audit.(auditEvent).validate"
func (a *AuditEvent) validate() error {
const op = "audit.(AuditEvent).validate"
if a == nil {
return fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)

View File

@@ -29,14 +29,14 @@ func TestAuditEvent_new(t *testing.T) {
Subtype: subtype(""),
Format: format(""),
IsErrorExpected: true,
ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
},
"empty-Option": {
Options: []Option{},
Subtype: subtype(""),
Format: format(""),
IsErrorExpected: true,
ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
},
"bad-id": {
Options: []Option{WithID("")},
@@ -108,22 +108,22 @@ func TestAuditEvent_new(t *testing.T) {
// TestAuditEvent_Validate exercises the validation for an audit event.
func TestAuditEvent_Validate(t *testing.T) {
tests := map[string]struct {
Value *auditEvent
Value *AuditEvent
IsErrorExpected bool
ExpectedErrorMessage string
}{
"nil": {
Value: nil,
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: event is nil: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: event is nil: invalid parameter",
},
"default": {
Value: &auditEvent{},
Value: &AuditEvent{},
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter",
},
"id-empty": {
Value: &auditEvent{
Value: &AuditEvent{
ID: "",
Version: version,
Subtype: RequestType,
@@ -131,10 +131,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter",
},
"version-fiddled": {
Value: &auditEvent{
Value: &AuditEvent{
ID: "audit_123",
Version: "magic-v2",
Subtype: RequestType,
@@ -142,10 +142,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: event version unsupported: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: event version unsupported: invalid parameter",
},
"subtype-fiddled": {
Value: &auditEvent{
Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: subtype("moon"),
@@ -153,10 +153,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter",
},
"default-time": {
Value: &auditEvent{
Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: ResponseType,
@@ -164,10 +164,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
ExpectedErrorMessage: "audit.(auditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter",
ExpectedErrorMessage: "audit.(AuditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter",
},
"valid": {
Value: &auditEvent{
Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: ResponseType,

View File

@@ -15,10 +15,12 @@ import (
)
// ProcessManual will attempt to create an (audit) event with the specified data
// and manually iterate over the supplied nodes calling Process on each.
// and manually iterate over the supplied nodes calling Process on each until the
// event is nil (which indicates the pipeline has completed).
// Order of IDs in the NodeID slice determines the order they are processed.
// (Audit) Event will be of RequestType (as opposed to ResponseType).
// The last node must be a sink node (eventlogger.NodeTypeSink).
// The last node must be a filter node (eventlogger.NodeTypeFilter) or
// sink node (eventlogger.NodeTypeSink).
func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogger.NodeID, nodes map[eventlogger.NodeID]eventlogger.Node) error {
switch {
case data == nil:
@@ -52,9 +54,15 @@ func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogge
// Process nodes in order, updating the event with the result.
// This means we *should* do:
// 1. formatter (temporary)
// 2. sink
// 1. filter (optional if configured)
// 2. formatter (temporary)
// 3. sink
for _, id := range ids {
// If the event is nil, we've completed processing the pipeline (hopefully
// by either a filter node or a sink node).
if e == nil {
break
}
node, ok := nodes[id]
if !ok {
return fmt.Errorf("node not found: %v", id)
@@ -74,12 +82,14 @@ func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogge
return err
}
// Track the last node we have processed, as we should end with a sink.
// Track the last node we have processed, as we should end with a filter or sink.
lastSeen = node.Type()
}
if lastSeen != eventlogger.NodeTypeSink {
return errors.New("last node must be a sink")
switch lastSeen {
case eventlogger.NodeTypeSink, eventlogger.NodeTypeFilter:
default:
return errors.New("last node must be a filter or sink")
}
return nil

View File

@@ -185,12 +185,13 @@ func TestProcessManual_LastNodeNotSink(t *testing.T) {
err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes)
require.Error(t, err)
require.EqualError(t, err, "last node must be a sink")
require.EqualError(t, err, "last node must be a filter or sink")
}
// TestProcessManual ensures that the manual processing of a test message works
// as expected with proper inputs.
func TestProcessManual(t *testing.T) {
// TestProcessManualEndWithSink ensures that the manual processing of a test
// message works as expected with proper inputs, which mean processing ends with
// sink node.
func TestProcessManualEndWithSink(t *testing.T) {
t.Parallel()
var ids []eventlogger.NodeID
@@ -215,6 +216,39 @@ func TestProcessManual(t *testing.T) {
require.NoError(t, err)
}
// TestProcessManual_EndWithFilter ensures that the manual processing of a test
// message works as expected with proper inputs, which mean processing ends with
// sink node.
func TestProcessManual_EndWithFilter(t *testing.T) {
t.Parallel()
var ids []eventlogger.NodeID
nodes := make(map[eventlogger.NodeID]eventlogger.Node)
// Filter node
filterId, filterNode := newFilterNode(t)
ids = append(ids, filterId)
nodes[filterId] = filterNode
// Formatter node
formatterId, formatterNode := newFormatterNode(t)
ids = append(ids, formatterId)
nodes[formatterId] = formatterNode
// Sink node
sinkId, sinkNode := newSinkNode(t)
ids = append(ids, sinkId)
nodes[sinkId] = sinkNode
// Data
requestId, err := uuid.GenerateUUID()
require.NoError(t, err)
data := newData(requestId)
err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes)
require.NoError(t, err)
}
// newSinkNode creates a new UUID and NoopSink (sink node).
func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) {
t.Helper()
@@ -226,6 +260,25 @@ func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) {
return sinkId, sinkNode
}
// TestFilter is a trivial implementation of eventlogger.Node used as a placeholder
// for Filter nodes in tests.
type TestFilter struct{}
// Process trivially filters the event preventing it from being processed by subsequent nodes.
func (f *TestFilter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
return nil, nil
}
// Reopen does nothing.
func (f *TestFilter) Reopen() error {
return nil
}
// Type returns the eventlogger.NodeTypeFormatter type.
func (f *TestFilter) Type() eventlogger.NodeType {
return eventlogger.NodeTypeFilter
}
// TestFormatter is a trivial implementation of the eventlogger.Node interface
// used as a place-holder for Formatter nodes in tests.
type TestFormatter struct{}
@@ -248,6 +301,15 @@ func (f *TestFormatter) Type() eventlogger.NodeType {
return eventlogger.NodeTypeFormatter
}
// newFilterNode creates a new TestFormatter (filter node).
func newFilterNode(t *testing.T) (eventlogger.NodeID, *TestFilter) {
nodeId, err := event.GenerateNodeID()
require.NoError(t, err)
node := &TestFilter{}
return nodeId, node
}
// newFormatterNode creates a new TestFormatter (formatter node).
func newFormatterNode(t *testing.T) (eventlogger.NodeID, *TestFormatter) {
nodeId, err := event.GenerateNodeID()

View File

@@ -11,8 +11,10 @@ import (
"github.com/hashicorp/eventlogger"
)
var _ eventlogger.Node = (*SinkWrapper)(nil)
// SinkWrapper is a wrapper for any kind of Sink Node that processes events
// containing an auditEvent payload.
// containing an AuditEvent payload.
type SinkWrapper struct {
Name string
Sink eventlogger.Node
@@ -23,7 +25,7 @@ type SinkWrapper struct {
// once this method returns.
func (s *SinkWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
defer func() {
auditEvent, ok := e.Payload.(*auditEvent)
auditEvent, ok := e.Payload.(*AuditEvent)
if ok {
metrics.MeasureSince([]string{"audit", s.Name, auditEvent.Subtype.MetricTag()}, e.CreatedAt)
}

View File

@@ -8,9 +8,9 @@ import (
"io"
"time"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/salt"
"github.com/hashicorp/vault/sdk/logical"
)
@@ -35,8 +35,8 @@ type subtype string
// format defines types of format audit events support.
type format string
// auditEvent is the audit event.
type auditEvent struct {
// AuditEvent is the audit event.
type AuditEvent struct {
ID string `json:"id"`
Version string `json:"version"`
Subtype subtype `json:"subtype"` // the subtype of the audit event.
@@ -144,6 +144,13 @@ type FormatterConfig struct {
RequiredFormat format
}
// EntryFilter should be used to filter audit requests and responses which should
// make it to a sink.
type EntryFilter struct {
// the evaluator for the bexpr expression that should be applied by the node.
evaluator *bexpr.Evaluator
}
// RequestEntry is the structure of a request audit log entry.
type RequestEntry struct {
Time string `json:"time,omitempty"`
@@ -268,6 +275,10 @@ type Backend interface {
// Salter interface must be implemented by anything implementing Backend.
Salter
// The PipelineReader interface allows backends to surface information about their
// nodes for node and pipeline registration.
event.PipelineReader
// LogRequest is used to synchronously log a request. This is done after the
// request is authorized but before the request is executed. The arguments
// MUST not be modified in any way. They should be deep copied if this is
@@ -291,12 +302,6 @@ type Backend interface {
// Invalidate is called for path invalidation
Invalidate(context.Context)
// RegisterNodesAndPipeline provides an eventlogger.Broker pointer so that
// the Backend can call its RegisterNode and RegisterPipeline methods with
// the nodes and the pipeline that were created in the corresponding
// Factory function.
RegisterNodesAndPipeline(*eventlogger.Broker, string) error
}
// BackendConfig contains configuration parameters used in the factory func to

View File

@@ -27,75 +27,71 @@ const (
discard = "discard"
)
func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
var _ audit.Backend = (*Backend)(nil)
// Backend is the audit backend for the file-based audit store.
//
// NOTE: This audit backend is currently very simple: it appends to a file.
// It doesn't do anything more at the moment to assist with rotation
// or reset the write cursor, this should be done in the future.
type Backend struct {
f *os.File
fileLock sync.RWMutex
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
mode os.FileMode
name string
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
filePath string
salt *atomic.Value
saltConfig *salt.Config
saltMutex sync.RWMutex
saltView logical.Storage
}
func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
const op = "file.Factory"
if conf.SaltConfig == nil {
return nil, fmt.Errorf("nil salt config")
return nil, fmt.Errorf("%s: nil salt config", op)
}
if conf.SaltView == nil {
return nil, fmt.Errorf("nil salt view")
return nil, fmt.Errorf("%s: nil salt view", op)
}
path, ok := conf.Config["file_path"]
if !ok {
path, ok = conf.Config["path"]
if !ok {
return nil, fmt.Errorf("file_path is required")
}
// Get file path from config or fall back to the old option name ('path') for compatibility
// (see commit bac4fe0799a372ba1245db642f3f6cd1f1d02669).
var filePath string
if p, ok := conf.Config["file_path"]; ok {
filePath = p
} else if p, ok = conf.Config["path"]; ok {
filePath = p
} else {
return nil, fmt.Errorf("%s: file_path is required", op)
}
// normalize path if configured for stdout
if strings.EqualFold(path, stdout) {
path = stdout
// normalize file path if configured for stdout
if strings.EqualFold(filePath, stdout) {
filePath = stdout
}
if strings.EqualFold(path, discard) {
path = discard
}
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
if raw, ok := conf.Config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithElision(v))
if strings.EqualFold(filePath, discard) {
filePath = discard
}
mode := os.FileMode(0o600)
if modeRaw, ok := conf.Config["mode"]; ok {
m, err := strconv.ParseUint(modeRaw, 8, 32)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: unable to parse 'mode': %w", op, err)
}
switch m {
case 0:
// if mode is 0000, then do not modify file mode
if path != stdout && path != discard {
fileInfo, err := os.Stat(path)
if filePath != stdout && filePath != discard {
fileInfo, err := os.Stat(filePath)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: unable to stat %q: %w", op, filePath, err)
}
mode = fileInfo.Mode()
}
@@ -104,18 +100,19 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
}
}
cfg, err := audit.NewFormatterConfig(cfgOpts...)
cfg, err := formatterConfig(conf.Config)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
b := &Backend{
path: path,
filePath: filePath,
formatConfig: cfg,
mode: mode,
name: conf.MountPath,
saltConfig: conf.SaltConfig,
saltView: conf.SaltView,
salt: new(atomic.Value),
formatConfig: cfg,
}
// Ensure we are working with the right type by explicitly storing a nil of
@@ -125,8 +122,9 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
// Configure the formatter for either case.
f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig), audit.WithPrefix(conf.Config["prefix"]))
if err != nil {
return nil, fmt.Errorf("error creating formatter: %w", err)
return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
var w audit.Writer
switch b.formatConfig.RequiredFormat {
case audit.JSONFormat:
@@ -134,63 +132,40 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
case audit.JSONxFormat:
w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]}
default:
return nil, fmt.Errorf("unknown format type %q", b.formatConfig.RequiredFormat)
return nil, fmt.Errorf("%s: unknown format type %q", op, b.formatConfig.RequiredFormat)
}
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
return nil, fmt.Errorf("error creating formatter writer: %w", err)
return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
b.nodeIDList = make([]eventlogger.NodeID, 2)
b.nodeIDList = []eventlogger.NodeID{}
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
formatterNodeID, err := event.GenerateNodeID()
err := b.configureFilterNode(conf.Config["filter"])
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
b.nodeIDList[0] = formatterNodeID
b.nodeMap[formatterNodeID] = f
var sinkNode eventlogger.Node
switch path {
case stdout:
sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(b.formatConfig.RequiredFormat.String())}
case discard:
sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewNoopSink()}
default:
var err error
var opts []event.Option
// Check if mode is provided
if modeRaw, ok := conf.Config["mode"]; ok {
opts = append(opts, event.WithFileMode(modeRaw))
formatterOpts := []audit.Option{
audit.WithHeaderFormatter(headersConfig),
audit.WithPrefix(conf.Config["prefix"]),
}
// The NewFileSink function attempts to open the file and will
// return an error if it can't.
n, err := event.NewFileSink(
b.path,
b.formatConfig.RequiredFormat.String(), opts...)
err = b.configureFormatterNode(cfg, formatterOpts...)
if err != nil {
return nil, fmt.Errorf("file sink creation failed for path %q: %w", path, err)
}
sinkNode = &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
sinkNodeID, err := event.GenerateNodeID()
err = b.configureSinkNode(conf.MountPath, filePath, conf.Config["mode"], cfg.RequiredFormat.String())
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
}
b.nodeIDList[1] = sinkNodeID
b.nodeMap[sinkNodeID] = sinkNode
} else {
switch path {
switch filePath {
case stdout:
case discard:
default:
@@ -198,7 +173,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
// otherwise it will be too late to catch later without problems
// (ref: https://github.com/hashicorp/vault/issues/550)
if err := b.open(); err != nil {
return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", path, err)
return nil, fmt.Errorf("%s: sanity check failed; unable to open %q for writing: %w", op, filePath, err)
}
}
}
@@ -206,32 +181,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
return b, nil
}
// Backend is the audit backend for the file-based audit store.
//
// NOTE: This audit backend is currently very simple: it appends to a file.
// It doesn't do anything more at the moment to assist with rotation
// or reset the write cursor, this should be done in the future.
type Backend struct {
path string
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
fileLock sync.RWMutex
f *os.File
mode os.FileMode
saltMutex sync.RWMutex
salt *atomic.Value
saltConfig *salt.Config
saltView logical.Storage
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
}
var _ audit.Backend = (*Backend)(nil)
func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
s := b.salt.Load().(*salt.Salt)
if s != nil {
@@ -256,9 +205,10 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
return newSalt, nil
}
// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
switch b.path {
switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -274,6 +224,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return b.log(ctx, buf, writer)
}
// Deprecated: Use eventlogger.
func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) error {
reader := bytes.NewReader(buf.Bytes())
@@ -290,7 +241,7 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
if _, err := reader.WriteTo(writer); err == nil {
b.fileLock.Unlock()
return nil
} else if b.path == stdout {
} else if b.filePath == stdout {
b.fileLock.Unlock()
return err
}
@@ -312,9 +263,10 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
return err
}
// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
switch b.path {
switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -338,7 +290,7 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
// Old behavior
var writer io.Writer
switch b.path {
switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -360,27 +312,28 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
}
// The file lock must be held before calling this
// Deprecated: Use eventlogger.
func (b *Backend) open() error {
if b.f != nil {
return nil
}
if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil {
if err := os.MkdirAll(filepath.Dir(b.filePath), b.mode); err != nil {
return err
}
var err error
b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
b.f, err = os.OpenFile(b.filePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
if err != nil {
return err
}
// Change the file mode in case the log file already existed. We special
// case /dev/null since we can't chmod it and bypass if the mode is zero
switch b.path {
switch b.filePath {
case "/dev/null":
default:
if b.mode != 0 {
err = os.Chmod(b.path, b.mode)
err = os.Chmod(b.filePath, b.mode)
if err != nil {
return err
}
@@ -402,7 +355,7 @@ func (b *Backend) Reload(_ context.Context) error {
return nil
} else {
// old non-eventlogger behavior
switch b.path {
switch b.filePath {
case stdout, discard:
return nil
}
@@ -432,20 +385,168 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt.Store((*salt.Salt)(nil))
}
// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
// the audit.Backend interface.
func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
for id, node := range b.nodeMap {
if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
return err
}
// formatterConfig creates the configuration required by a formatter node using
// the config map supplied to the factory.
func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
const op = "file.formatterConfig"
var opts []audit.Option
if format, ok := config["format"]; ok {
opts = append(opts, audit.WithFormat(format))
}
pipeline := eventlogger.Pipeline{
PipelineID: eventlogger.PipelineID(name),
EventType: eventlogger.EventType(event.AuditType.String()),
NodeIDs: b.nodeIDList,
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
}
opts = append(opts, audit.WithHMACAccessor(v))
}
return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
// Check if raw logging is enabled
if raw, ok := config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
}
opts = append(opts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
}
opts = append(opts, audit.WithElision(v))
}
return audit.NewFormatterConfig(opts...)
}
// configureFilterNode is used to configure a filter node and associated ID on the Backend.
func (b *Backend) configureFilterNode(filter string) error {
const op = "file.(Backend).configureFilterNode"
filter = strings.TrimSpace(filter)
if filter == "" {
return nil
}
filterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
}
filterNode, err := audit.NewEntryFilter(filter)
if err != nil {
return fmt.Errorf("%s: error creating filter node: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, filterNodeID)
b.nodeMap[filterNodeID] = filterNode
return nil
}
// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
const op = "file.(Backend).configureFormatterNode"
formatterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
}
formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
if err != nil {
return fmt.Errorf("%s: error creating formatter: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, formatterNodeID)
b.nodeMap[formatterNodeID] = formatterNode
return nil
}
// configureSinkNode is used to configure a sink node and associated ID on the Backend.
func (b *Backend) configureSinkNode(name string, filePath string, mode string, format string) error {
const op = "file.(Backend).configureSinkNode"
name = strings.TrimSpace(name)
if name == "" {
return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
}
filePath = strings.TrimSpace(filePath)
if filePath == "" {
return fmt.Errorf("%s: file path is required: %w", op, event.ErrInvalidParameter)
}
format = strings.TrimSpace(format)
if format == "" {
return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
}
sinkNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
}
// normalize file path if configured for stdout or discard
if strings.EqualFold(filePath, stdout) {
filePath = stdout
} else if strings.EqualFold(filePath, discard) {
filePath = discard
}
var sinkNode eventlogger.Node
var sinkName string
switch filePath {
case stdout:
sinkName = stdout
sinkNode, err = event.NewStdoutSinkNode(format)
case discard:
sinkName = discard
sinkNode = event.NewNoopSink()
default:
// The NewFileSink function attempts to open the file and will return an error if it can't.
sinkName = name
sinkNode, err = event.NewFileSink(filePath, format, []event.Option{event.WithFileMode(mode)}...)
}
if err != nil {
return fmt.Errorf("%s: file sink creation failed for path %q: %w", op, filePath, err)
}
sinkNode = &audit.SinkWrapper{Name: sinkName, Sink: sinkNode}
b.nodeIDList = append(b.nodeIDList, sinkNodeID)
b.nodeMap[sinkNodeID] = sinkNode
return nil
}
// Name for this backend, this would ideally correspond to the mount path for the audit device.
func (b *Backend) Name() string {
return b.name
}
// Nodes returns the nodes which should be used by the event framework to process audit entries.
func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
return b.nodeMap
}
// NodeIDs returns the IDs of the nodes, in the order they are required.
func (b *Backend) NodeIDs() []eventlogger.NodeID {
return b.nodeIDList
}
// EventType returns the event type for the backend.
func (b *Backend) EventType() eventlogger.EventType {
return eventlogger.EventType(event.AuditType.String())
}
// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}

View File

@@ -12,10 +12,12 @@ import (
"testing"
"time"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/helper/salt"
"github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/require"
)
func TestAuditFile_fileModeNew(t *testing.T) {
@@ -145,6 +147,7 @@ func TestAuditFile_EventLogger_fileModeNew(t *testing.T) {
}
_, err = Factory(context.Background(), &audit.BackendConfig{
MountPath: "foo/bar",
SaltConfig: &salt.Config{},
SaltView: &logical.InmemStorage{},
Config: config,
@@ -210,3 +213,366 @@ func BenchmarkAuditFile_request(b *testing.B) {
}
})
}
// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
func TestBackend_formatterConfig(t *testing.T) {
t.Parallel()
tests := map[string]struct {
config map[string]string
want audit.FormatterConfig
wantErr bool
expectedMessage string
}{
"happy-path-json": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "json",
}, wantErr: false,
},
"happy-path-jsonx": {
config: map[string]string{
"format": audit.JSONxFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "jsonx",
},
wantErr: false,
},
"invalid-format": {
config: map[string]string{
"format": " squiggly ",
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedMessage: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
},
"invalid-hmac-accessor": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedMessage: "file.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-log-raw": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedMessage: "file.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-elide-bool": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedMessage: "file.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := formatterConfig(tc.config)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedMessage)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.want, got)
})
}
}
// TestBackend_configureFilterNode ensures that configureFilterNode handles various
// filter values as expected. Empty (including whitespace) strings should return
// no error but skip configuration of the node.
func TestBackend_configureFilterNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
filter string
shouldSkipNode bool
wantErr bool
expectedErrorMsg string
}{
"happy": {
filter: "foo == bar",
},
"empty": {
filter: "",
shouldSkipNode: true,
},
"spacey": {
filter: " ",
shouldSkipNode: true,
},
"bad": {
filter: "___qwerty",
wantErr: true,
expectedErrorMsg: "file.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureFilterNode(tc.filter)
switch {
case tc.wantErr:
require.Error(t, err)
require.ErrorContains(t, err, tc.expectedErrorMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
case tc.shouldSkipNode:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
default:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
}
})
}
}
// TestBackend_configureFormatterNode ensures that configureFormatterNode
// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
func TestBackend_configureFormatterNode(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
}
// TestBackend_configureSinkNode ensures that we can correctly configure the sink
// node on the Backend, and any incorrect parameters result in the relevant errors.
func TestBackend_configureSinkNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
name string
filePath string
mode string
format string
wantErr bool
expectedErrMsg string
expectedName string
}{
"name-empty": {
name: "",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: name is required: invalid parameter",
},
"name-whitespace": {
name: " ",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: name is required: invalid parameter",
},
"filePath-empty": {
name: "foo",
filePath: "",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: file path is required: invalid parameter",
},
"filePath-whitespace": {
name: "foo",
filePath: " ",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: file path is required: invalid parameter",
},
"filePath-stdout-lower": {
name: "foo",
expectedName: "stdout",
filePath: "stdout",
format: "json",
},
"filePath-stdout-upper": {
name: "foo",
expectedName: "stdout",
filePath: "STDOUT",
format: "json",
},
"filePath-stdout-mixed": {
name: "foo",
expectedName: "stdout",
filePath: "StdOut",
format: "json",
},
"filePath-discard-lower": {
name: "foo",
expectedName: "discard",
filePath: "discard",
format: "json",
},
"filePath-discard-upper": {
name: "foo",
expectedName: "discard",
filePath: "DISCARD",
format: "json",
},
"filePath-discard-mixed": {
name: "foo",
expectedName: "discard",
filePath: "DisCArd",
format: "json",
},
"format-empty": {
name: "foo",
filePath: "/tmp/",
format: "",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: format is required: invalid parameter",
},
"format-whitespace": {
name: "foo",
filePath: "/tmp/",
format: " ",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: format is required: invalid parameter",
},
"filePath-weird-with-mode-zero": {
name: "foo",
filePath: "/tmp/qwerty",
format: "json",
mode: "0",
wantErr: true,
expectedErrMsg: "file.(Backend).configureSinkNode: file sink creation failed for path \"/tmp/qwerty\": event.NewFileSink: unable to determine existing file mode: stat /tmp/qwerty: no such file or directory",
},
"happy": {
name: "foo",
filePath: "/tmp/audit.log",
mode: "",
format: "json",
wantErr: false,
expectedName: "foo",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureSinkNode(tc.name, tc.filePath, tc.mode, tc.format)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
} else {
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
sw, ok := node.(*audit.SinkWrapper)
require.True(t, ok)
require.Equal(t, tc.expectedName, sw.Name)
}
})
}
}
// TestBackend_configureFilterFormatterSink ensures that configuring all three
// types of nodes on a Backend works as expected, i.e. we have all three nodes
// at the end and nothing gets overwritten. The order of calls influences the
// slice of IDs on the Backend.
func TestBackend_configureFilterFormatterSink(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFilterNode("foo == bar")
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
err = b.configureSinkNode("foo", "/tmp/foo", "0777", "json")
require.NoError(t, err)
require.Len(t, b.nodeIDList, 3)
require.Len(t, b.nodeMap, 3)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
id = b.nodeIDList[1]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
id = b.nodeIDList[2]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
@@ -21,83 +22,76 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
var _ audit.Backend = (*Backend)(nil)
// Backend is the audit backend for the socket audit transport.
type Backend struct {
sync.Mutex
address string
connection net.Conn
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
name string
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
salt *salt.Salt
saltConfig *salt.Config
saltMutex sync.RWMutex
saltView logical.Storage
socketType string
writeDuration time.Duration
}
func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
const op = "socket.Factory"
if conf.SaltConfig == nil {
return nil, fmt.Errorf("nil salt config")
return nil, fmt.Errorf("%s: nil salt config", op)
}
if conf.SaltView == nil {
return nil, fmt.Errorf("nil salt view")
return nil, fmt.Errorf("%s: nil salt view", op)
}
address, ok := conf.Config["address"]
if !ok {
return nil, fmt.Errorf("address is required")
return nil, fmt.Errorf("%s: address is required", op)
}
socketType, ok := conf.Config["socket_type"]
if !ok {
socketType = "tcp"
}
writeDeadline, ok := conf.Config["write_timeout"]
if !ok {
writeDeadline = "2s"
}
writeDuration, err := parseutil.ParseDurationSecond(writeDeadline)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: failed to parse 'write_timeout': %w", op, err)
}
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
cfg, err := formatterConfig(conf.Config)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
if raw, ok := conf.Config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
cfg, err := audit.NewFormatterConfig(cfgOpts...)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
b := &Backend{
address: address,
formatConfig: cfg,
name: conf.MountPath,
saltConfig: conf.SaltConfig,
saltView: conf.SaltView,
formatConfig: cfg,
writeDuration: writeDuration,
address: address,
socketType: socketType,
writeDuration: writeDuration,
}
// Configure the formatter for either case.
f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig))
f, err := audit.NewEntryFormatter(cfg, b, audit.WithHeaderFormatter(headersConfig))
if err != nil {
return nil, fmt.Errorf("error creating formatter: %w", err)
return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
var w audit.Writer
switch b.formatConfig.RequiredFormat {
@@ -109,72 +103,44 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
return nil, fmt.Errorf("error creating formatter writer: %w", err)
return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
var opts []event.Option
if socketType, ok := conf.Config["socket_type"]; ok {
opts = append(opts, event.WithSocketType(socketType))
}
if writeDeadline, ok := conf.Config["write_timeout"]; ok {
opts = append(opts, event.WithMaxDuration(writeDeadline))
}
b.nodeIDList = make([]eventlogger.NodeID, 2)
b.nodeIDList = []eventlogger.NodeID{}
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
formatterNodeID, err := event.GenerateNodeID()
err := b.configureFilterNode(conf.Config["filter"])
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
b.nodeIDList[0] = formatterNodeID
b.nodeMap[formatterNodeID] = f
n, err := event.NewSocketSink(b.formatConfig.RequiredFormat.String(), address, opts...)
if err != nil {
return nil, fmt.Errorf("error creating socket sink node: %w", err)
opts := []audit.Option{
audit.WithHeaderFormatter(headersConfig),
}
sinkNode := &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
sinkNodeID, err := event.GenerateNodeID()
err = b.configureFormatterNode(cfg, opts...)
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
sinkOpts := []event.Option{
event.WithSocketType(socketType),
event.WithMaxDuration(writeDeadline),
}
err = b.configureSinkNode(conf.MountPath, address, cfg.RequiredFormat.String(), sinkOpts...)
if err != nil {
return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
}
b.nodeIDList[1] = sinkNodeID
b.nodeMap[sinkNodeID] = sinkNode
}
return b, nil
}
// Backend is the audit backend for the socket audit transport.
type Backend struct {
connection net.Conn
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
writeDuration time.Duration
address string
socketType string
sync.Mutex
saltMutex sync.RWMutex
salt *salt.Salt
saltConfig *salt.Config
saltView logical.Storage
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
}
var _ audit.Backend = (*Backend)(nil)
// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteRequest(ctx, &buf, in); err != nil {
@@ -198,6 +164,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return err
}
// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteResponse(ctx, &buf, in); err != nil {
@@ -256,6 +223,7 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
return err
}
// Deprecated: Use eventlogger.
func (b *Backend) write(ctx context.Context, buf []byte) error {
if b.connection == nil {
if err := b.reconnect(ctx); err != nil {
@@ -276,6 +244,7 @@ func (b *Backend) write(ctx context.Context, buf []byte) error {
return nil
}
// Deprecated: Use eventlogger.
func (b *Backend) reconnect(ctx context.Context) error {
if b.connection != nil {
b.connection.Close()
@@ -317,12 +286,12 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
if b.salt != nil {
return b.salt, nil
}
salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
if err != nil {
return nil, err
}
b.salt = salt
return salt, nil
b.salt = s
return s, nil
}
func (b *Backend) Invalidate(_ context.Context) {
@@ -331,20 +300,146 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt = nil
}
// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
// the audit.Backend interface.
func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
for id, node := range b.nodeMap {
if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
return err
}
// formatterConfig creates the configuration required by a formatter node using
// the config map supplied to the factory.
func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
const op = "socket.formatterConfig"
var cfgOpts []audit.Option
if format, ok := config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
pipeline := eventlogger.Pipeline{
PipelineID: eventlogger.PipelineID(name),
EventType: eventlogger.EventType(event.AuditType.String()),
NodeIDs: b.nodeIDList,
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
}
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
// Check if raw logging is enabled
if raw, ok := config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
}
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
}
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
return audit.NewFormatterConfig(cfgOpts...)
}
// configureFilterNode is used to configure a filter node and associated ID on the Backend.
func (b *Backend) configureFilterNode(filter string) error {
const op = "socket.(Backend).configureFilterNode"
filter = strings.TrimSpace(filter)
if filter == "" {
return nil
}
filterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
}
filterNode, err := audit.NewEntryFilter(filter)
if err != nil {
return fmt.Errorf("%s: error creating filter node: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, filterNodeID)
b.nodeMap[filterNodeID] = filterNode
return nil
}
// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
const op = "socket.(Backend).configureFormatterNode"
formatterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
}
formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
if err != nil {
return fmt.Errorf("%s: error creating formatter: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, formatterNodeID)
b.nodeMap[formatterNodeID] = formatterNode
return nil
}
// configureSinkNode is used to configure a sink node and associated ID on the Backend.
func (b *Backend) configureSinkNode(name string, address string, format string, opts ...event.Option) error {
const op = "socket.(Backend).configureSinkNode"
name = strings.TrimSpace(name)
if name == "" {
return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
}
address = strings.TrimSpace(address)
if address == "" {
return fmt.Errorf("%s: address is required: %w", op, event.ErrInvalidParameter)
}
format = strings.TrimSpace(format)
if format == "" {
return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
}
sinkNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
}
n, err := event.NewSocketSink(address, format, opts...)
if err != nil {
return fmt.Errorf("%s: error creating socket sink node: %w", op, err)
}
sinkNode := &audit.SinkWrapper{Name: name, Sink: n}
b.nodeIDList = append(b.nodeIDList, sinkNodeID)
b.nodeMap[sinkNodeID] = sinkNode
return nil
}
// Name for this backend, this would ideally correspond to the mount path for the audit device.
func (b *Backend) Name() string {
return b.name
}
// Nodes returns the nodes which should be used by the event framework to process audit entries.
func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
return b.nodeMap
}
// NodeIDs returns the IDs of the nodes, in the order they are required.
func (b *Backend) NodeIDs() []eventlogger.NodeID {
return b.nodeIDList
}
// EventType returns the event type for the backend.
func (b *Backend) EventType() eventlogger.EventType {
return eventlogger.EventType(event.AuditType.String())
}
// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}

View File

@@ -0,0 +1,331 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package socket
import (
"testing"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
"github.com/stretchr/testify/require"
)
// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
func TestBackend_formatterConfig(t *testing.T) {
t.Parallel()
tests := map[string]struct {
config map[string]string
want audit.FormatterConfig
wantErr bool
expectedErrMsg string
}{
"happy-path-json": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "json",
}, wantErr: false,
},
"happy-path-jsonx": {
config: map[string]string{
"format": audit.JSONxFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "jsonx",
},
wantErr: false,
},
"invalid-format": {
config: map[string]string{
"format": " squiggly ",
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
},
"invalid-hmac-accessor": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "socket.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-log-raw": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "socket.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-elide-bool": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "socket.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := formatterConfig(tc.config)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.want, got)
})
}
}
// TestBackend_configureFilterNode ensures that configureFilterNode handles various
// filter values as expected. Empty (including whitespace) strings should return
// no error but skip configuration of the node.
func TestBackend_configureFilterNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
filter string
shouldSkipNode bool
wantErr bool
expectedErrorMsg string
}{
"happy": {
filter: "foo == bar",
},
"empty": {
filter: "",
shouldSkipNode: true,
},
"spacey": {
filter: " ",
shouldSkipNode: true,
},
"bad": {
filter: "___qwerty",
wantErr: true,
expectedErrorMsg: "socket.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureFilterNode(tc.filter)
switch {
case tc.wantErr:
require.Error(t, err)
require.ErrorContains(t, err, tc.expectedErrorMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
case tc.shouldSkipNode:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
default:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
}
})
}
}
// TestBackend_configureFormatterNode ensures that configureFormatterNode
// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
func TestBackend_configureFormatterNode(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
}
// TestBackend_configureSinkNode ensures that we can correctly configure the sink
// node on the Backend, and any incorrect parameters result in the relevant errors.
func TestBackend_configureSinkNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
name string
address string
format string
wantErr bool
expectedErrMsg string
expectedName string
}{
"name-empty": {
name: "",
address: "wss://foo",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: name is required: invalid parameter",
},
"name-whitespace": {
name: " ",
address: "wss://foo",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: name is required: invalid parameter",
},
"address-empty": {
name: "foo",
address: "",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: address is required: invalid parameter",
},
"address-whitespace": {
name: "foo",
address: " ",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: address is required: invalid parameter",
},
"format-empty": {
name: "foo",
address: "wss://foo",
format: "",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: format is required: invalid parameter",
},
"format-whitespace": {
name: "foo",
address: "wss://foo",
format: " ",
wantErr: true,
expectedErrMsg: "socket.(Backend).configureSinkNode: format is required: invalid parameter",
},
"happy": {
name: "foo",
address: "wss://foo",
format: "json",
wantErr: false,
expectedName: "foo",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureSinkNode(tc.name, tc.address, tc.format)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
} else {
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
sw, ok := node.(*audit.SinkWrapper)
require.True(t, ok)
require.Equal(t, tc.expectedName, sw.Name)
}
})
}
}
// TestBackend_configureFilterFormatterSink ensures that configuring all three
// types of nodes on a Backend works as expected, i.e. we have all three nodes
// at the end and nothing gets overwritten. The order of calls influences the
// slice of IDs on the Backend.
func TestBackend_configureFilterFormatterSink(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFilterNode("foo == bar")
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
err = b.configureSinkNode("foo", "https://hashicorp.com", "json")
require.NoError(t, err)
require.Len(t, b.nodeIDList, 3)
require.Len(t, b.nodeMap, 3)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
id = b.nodeIDList[1]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
id = b.nodeIDList[2]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}

View File

@@ -8,6 +8,7 @@ import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"github.com/hashicorp/eventlogger"
@@ -18,13 +19,31 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
var _ audit.Backend = (*Backend)(nil)
// Backend is the audit backend for the syslog-based audit store.
type Backend struct {
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
logger gsyslog.Syslogger
name string
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
salt *salt.Salt
saltConfig *salt.Config
saltMutex sync.RWMutex
saltView logical.Storage
}
func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
const op = "syslog.Factory"
if conf.SaltConfig == nil {
return nil, fmt.Errorf("nil salt config")
return nil, fmt.Errorf("%s: nil salt config", op)
}
if conf.SaltView == nil {
return nil, fmt.Errorf("nil salt view")
return nil, fmt.Errorf("%s: nil salt view", op)
}
// Get facility or default to AUTH
@@ -39,60 +58,29 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
tag = "vault"
}
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
cfg, err := formatterConfig(conf.Config)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
if raw, ok := conf.Config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
cfg, err := audit.NewFormatterConfig(cfgOpts...)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
// Get the logger
logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: cannot create logger: %w", op, err)
}
b := &Backend{
formatConfig: cfg,
logger: logger,
name: conf.MountPath,
saltConfig: conf.SaltConfig,
saltView: conf.SaltView,
formatConfig: cfg,
}
// Configure the formatter for either case.
f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig), audit.WithPrefix(conf.Config["prefix"]))
if err != nil {
return nil, fmt.Errorf("error creating formatter: %w", err)
return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
var w audit.Writer
@@ -105,67 +93,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
return nil, fmt.Errorf("error creating formatter writer: %w", err)
return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
var opts []event.Option
// Get facility or default to AUTH
if facility, ok := conf.Config["facility"]; ok {
opts = append(opts, event.WithFacility(facility))
}
if tag, ok := conf.Config["tag"]; ok {
opts = append(opts, event.WithTag(tag))
}
b.nodeIDList = make([]eventlogger.NodeID, 2)
b.nodeIDList = []eventlogger.NodeID{}
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
formatterNodeID, err := event.GenerateNodeID()
err := b.configureFilterNode(conf.Config["filter"])
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
b.nodeIDList[0] = formatterNodeID
b.nodeMap[formatterNodeID] = f
n, err := event.NewSyslogSink(b.formatConfig.RequiredFormat.String(), opts...)
if err != nil {
return nil, fmt.Errorf("error creating syslog sink node: %w", err)
formatterOpts := []audit.Option{
audit.WithHeaderFormatter(headersConfig),
audit.WithPrefix(conf.Config["prefix"]),
}
sinkNode := &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
sinkNodeID, err := event.GenerateNodeID()
err = b.configureFormatterNode(cfg, formatterOpts...)
if err != nil {
return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
b.nodeIDList[1] = sinkNodeID
b.nodeMap[sinkNodeID] = sinkNode
sinkOpts := []event.Option{
event.WithFacility(facility),
event.WithTag(tag),
}
err = b.configureSinkNode(conf.MountPath, cfg.RequiredFormat.String(), sinkOpts...)
if err != nil {
return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
}
}
return b, nil
}
// Backend is the audit backend for the syslog-based audit store.
type Backend struct {
logger gsyslog.Syslogger
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
saltMutex sync.RWMutex
salt *salt.Salt
saltConfig *salt.Config
saltView logical.Storage
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
}
var _ audit.Backend = (*Backend)(nil)
// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteRequest(ctx, &buf, in); err != nil {
@@ -177,6 +143,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return err
}
// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteResponse(ctx, &buf, in); err != nil {
@@ -227,12 +194,12 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
if b.salt != nil {
return b.salt, nil
}
salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
if err != nil {
return nil, err
}
b.salt = salt
return salt, nil
b.salt = s
return s, nil
}
func (b *Backend) Invalidate(_ context.Context) {
@@ -241,20 +208,142 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt = nil
}
// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
// the audit.Backend interface.
func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
for id, node := range b.nodeMap {
if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
return err
}
// formatterConfig creates the configuration required by a formatter node using
// the config map supplied to the factory.
func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
const op = "syslog.formatterConfig"
var opts []audit.Option
if format, ok := config["format"]; ok {
opts = append(opts, audit.WithFormat(format))
}
pipeline := eventlogger.Pipeline{
PipelineID: eventlogger.PipelineID(name),
EventType: eventlogger.EventType(event.AuditType.String()),
NodeIDs: b.nodeIDList,
// Check if hashing of accessor is disabled
if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
}
opts = append(opts, audit.WithHMACAccessor(v))
}
return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
// Check if raw logging is enabled
if raw, ok := config["log_raw"]; ok {
v, err := strconv.ParseBool(raw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
}
opts = append(opts, audit.WithRaw(v))
}
if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
}
opts = append(opts, audit.WithElision(v))
}
return audit.NewFormatterConfig(opts...)
}
// configureFilterNode is used to configure a filter node and associated ID on the Backend.
func (b *Backend) configureFilterNode(filter string) error {
const op = "syslog.(Backend).configureFilterNode"
filter = strings.TrimSpace(filter)
if filter == "" {
return nil
}
filterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
}
filterNode, err := audit.NewEntryFilter(filter)
if err != nil {
return fmt.Errorf("%s: error creating filter node: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, filterNodeID)
b.nodeMap[filterNodeID] = filterNode
return nil
}
// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
const op = "syslog.(Backend).configureFormatterNode"
formatterNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
}
formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
if err != nil {
return fmt.Errorf("%s: error creating formatter: %w", op, err)
}
b.nodeIDList = append(b.nodeIDList, formatterNodeID)
b.nodeMap[formatterNodeID] = formatterNode
return nil
}
// configureSinkNode is used to configure a sink node and associated ID on the Backend.
func (b *Backend) configureSinkNode(name string, format string, opts ...event.Option) error {
const op = "syslog.(Backend).configureSinkNode"
name = strings.TrimSpace(name)
if name == "" {
return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
}
format = strings.TrimSpace(format)
if format == "" {
return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
}
sinkNodeID, err := event.GenerateNodeID()
if err != nil {
return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
}
n, err := event.NewSyslogSink(format, opts...)
if err != nil {
return fmt.Errorf("%s: error creating syslog sink node: %w", op, err)
}
// wrap the sink node with metrics middleware
sinkNode := &audit.SinkWrapper{Name: name, Sink: n}
b.nodeIDList = append(b.nodeIDList, sinkNodeID)
b.nodeMap[sinkNodeID] = sinkNode
return nil
}
// Name for this backend, this would ideally correspond to the mount path for the audit device.
func (b *Backend) Name() string {
return b.name
}
// Nodes returns the nodes which should be used by the event framework to process audit entries.
func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
return b.nodeMap
}
// NodeIDs returns the IDs of the nodes, in the order they are required.
func (b *Backend) NodeIDs() []eventlogger.NodeID {
return b.nodeIDList
}
// EventType returns the event type for the backend.
func (b *Backend) EventType() eventlogger.EventType {
return eventlogger.EventType(event.AuditType.String())
}
// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}

View File

@@ -0,0 +1,313 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package syslog
import (
"testing"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
"github.com/stretchr/testify/require"
)
// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
func TestBackend_formatterConfig(t *testing.T) {
t.Parallel()
tests := map[string]struct {
config map[string]string
want audit.FormatterConfig
wantErr bool
expectedErrMsg string
}{
"happy-path-json": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "json",
}, wantErr: false,
},
"happy-path-jsonx": {
config: map[string]string{
"format": audit.JSONxFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{
Raw: true,
HMACAccessor: true,
ElideListResponses: true,
RequiredFormat: "jsonx",
},
wantErr: false,
},
"invalid-format": {
config: map[string]string{
"format": " squiggly ",
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "true",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
},
"invalid-hmac-accessor": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "syslog.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-log-raw": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "syslog.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
"invalid-elide-bool": {
config: map[string]string{
"format": audit.JSONFormat.String(),
"hmac_accessor": "true",
"log_raw": "true",
"elide_list_responses": "maybe",
},
want: audit.FormatterConfig{},
wantErr: true,
expectedErrMsg: "syslog.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := formatterConfig(tc.config)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
} else {
require.NoError(t, err)
}
require.Equal(t, tc.want, got)
})
}
}
// TestBackend_configureFilterNode ensures that configureFilterNode handles various
// filter values as expected. Empty (including whitespace) strings should return
// no error but skip configuration of the node.
func TestBackend_configureFilterNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
filter string
shouldSkipNode bool
wantErr bool
expectedErrorMsg string
}{
"happy": {
filter: "foo == bar",
},
"empty": {
filter: "",
shouldSkipNode: true,
},
"spacey": {
filter: " ",
shouldSkipNode: true,
},
"bad": {
filter: "___qwerty",
wantErr: true,
expectedErrorMsg: "syslog.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureFilterNode(tc.filter)
switch {
case tc.wantErr:
require.Error(t, err)
require.ErrorContains(t, err, tc.expectedErrorMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
case tc.shouldSkipNode:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
default:
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
}
})
}
}
// TestBackend_configureFormatterNode ensures that configureFormatterNode
// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
func TestBackend_configureFormatterNode(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
}
// TestBackend_configureSinkNode ensures that we can correctly configure the sink
// node on the Backend, and any incorrect parameters result in the relevant errors.
func TestBackend_configureSinkNode(t *testing.T) {
t.Parallel()
tests := map[string]struct {
name string
format string
wantErr bool
expectedErrMsg string
expectedName string
}{
"name-empty": {
name: "",
wantErr: true,
expectedErrMsg: "syslog.(Backend).configureSinkNode: name is required: invalid parameter",
},
"name-whitespace": {
name: " ",
wantErr: true,
expectedErrMsg: "syslog.(Backend).configureSinkNode: name is required: invalid parameter",
},
"format-empty": {
name: "foo",
format: "",
wantErr: true,
expectedErrMsg: "syslog.(Backend).configureSinkNode: format is required: invalid parameter",
},
"format-whitespace": {
name: "foo",
format: " ",
wantErr: true,
expectedErrMsg: "syslog.(Backend).configureSinkNode: format is required: invalid parameter",
},
"happy": {
name: "foo",
format: "json",
wantErr: false,
expectedName: "foo",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
err := b.configureSinkNode(tc.name, tc.format)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
require.Len(t, b.nodeIDList, 0)
require.Len(t, b.nodeMap, 0)
} else {
require.NoError(t, err)
require.Len(t, b.nodeIDList, 1)
require.Len(t, b.nodeMap, 1)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
sw, ok := node.(*audit.SinkWrapper)
require.True(t, ok)
require.Equal(t, tc.expectedName, sw.Name)
}
})
}
}
// TestBackend_configureFilterFormatterSink ensures that configuring all three
// types of nodes on a Backend works as expected, i.e. we have all three nodes
// at the end and nothing gets overwritten. The order of calls influences the
// slice of IDs on the Backend.
func TestBackend_configureFilterFormatterSink(t *testing.T) {
t.Parallel()
b := &Backend{
nodeIDList: []eventlogger.NodeID{},
nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
}
formatConfig, err := audit.NewFormatterConfig()
require.NoError(t, err)
err = b.configureFilterNode("foo == bar")
require.NoError(t, err)
err = b.configureFormatterNode(formatConfig)
require.NoError(t, err)
err = b.configureSinkNode("foo", "json")
require.NoError(t, err)
require.Len(t, b.nodeIDList, 3)
require.Len(t, b.nodeMap, 3)
id := b.nodeIDList[0]
node := b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
id = b.nodeIDList[1]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
id = b.nodeIDList[2]
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}

View File

@@ -1,3 +0,0 @@
```release-note:improvement
fairshare/jobmanager: Add 'stopped' method
```

3
changelog/24476.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: improve accessibility - color contrast, labels, and automatic testing
```

3
changelog/24492.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui: fix navigation items shown to user when chroot_namespace configured
```

3
changelog/24513.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui: fix KV v2 details view defaulting to JSON view when secret value includes `{`
```

3
changelog/24529.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
website/docs: Update references to Key Value secrets engine from 'K/V' to 'KV'
```

3
changelog/24530.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: obscure JSON values when KV v2 secret has nested objects
```

3
changelog/24549.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status.
```

3
changelog/24558.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:feature
core/audit: add filter parameter when enabling an audit device, allowing filtering (using go-bexpr expressions) of audit entries written to the device's audit log
```

View File

@@ -216,7 +216,7 @@ func (c *KVPatchCommand) Run(args []string) int {
}
if !v2 {
c.UI.Error("K/V engine mount must be version 2 for patch support")
c.UI.Error("KV engine mount must be version 2 for patch support")
return 2
}

View File

@@ -160,7 +160,7 @@ func (c *KVRollbackCommand) Run(args []string) int {
}
if !v2 {
c.UI.Error("K/V engine mount must be version 2 for rollback support")
c.UI.Error("KV engine mount must be version 2 for rollback support")
return 2
}

View File

@@ -2066,10 +2066,10 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
}
resp, err := core.HandleRequest(ctx, req)
if err != nil {
return nil, fmt.Errorf("error creating default K/V store: %w", err)
return nil, fmt.Errorf("error creating default KV store: %w", err)
}
if resp.IsError() {
return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error())
return nil, fmt.Errorf("failed to create default KV store: %w", resp.Error())
}
return init, nil

View File

@@ -9,7 +9,6 @@ import (
"io/ioutil"
"math"
"sync"
"sync/atomic"
"time"
"github.com/armon/go-metrics"
@@ -47,7 +46,6 @@ type JobManager struct {
// track queues by index for round robin worker assignment
queuesIndex []string
lastQueueAccessed int
stopped atomic.Bool
}
// NewJobManager creates a job manager, with an optional name
@@ -100,14 +98,9 @@ func (j *JobManager) Stop() {
j.logger.Trace("terminating job manager...")
close(j.quit)
j.workerPool.stop()
j.stopped.Store(true)
})
}
func (j *JobManager) Stopped() bool {
return j.stopped.Load()
}
// AddJob adds a job to the given queue, creating the queue if it doesn't exist
func (j *JobManager) AddJob(job Job, queueID string) {
j.l.Lock()

View File

@@ -9,8 +9,6 @@ import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestJobManager_NewJobManager(t *testing.T) {
@@ -176,7 +174,6 @@ func TestJobManager_Stop(t *testing.T) {
j := NewJobManager("job-mgr-test", 5, newTestLogger("jobmanager-test"), nil)
j.Start()
assert.False(t, j.Stopped())
doneCh := make(chan struct{})
timeout := time.After(5 * time.Second)
@@ -188,7 +185,6 @@ func TestJobManager_Stop(t *testing.T) {
select {
case <-doneCh:
assert.True(t, j.Stopped())
break
case <-timeout:
t.Fatal("timed out")

View File

@@ -6,9 +6,10 @@
package corehelpers
import (
"bytes"
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"os"
@@ -29,6 +30,11 @@ import (
"github.com/mitchellh/go-testing-interface"
)
var (
_ audit.Backend = (*NoopAudit)(nil)
_ eventlogger.Node = (*noopWrapper)(nil)
)
var externalPlugins = []string{"transform", "kmip", "keymgmt"}
// RetryUntil runs f until it returns a nil result or the timeout is reached.
@@ -210,52 +216,51 @@ func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.P
return consts.Unknown, false
}
func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit {
n, err := NewNoopAudit(config)
func TestNoopAudit(t testing.T, path string, config map[string]string, opts ...audit.Option) *NoopAudit {
cfg := &audit.BackendConfig{Config: config, MountPath: path}
n, err := NewNoopAudit(cfg, opts...)
if err != nil {
t.Fatal(err)
}
return n
}
func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
// NewNoopAudit should be used to create a NoopAudit as it handles creation of a
// predictable salt and wraps eventlogger nodes so information can be retrieved on
// what they've seen or formatted.
func NewNoopAudit(config *audit.BackendConfig, opts ...audit.Option) (*NoopAudit, error) {
view := &logical.InmemStorage{}
err := view.Put(context.Background(), &logical.StorageEntry{
Key: "salt",
Value: []byte("foo"),
})
// Create the salt with a known key for predictable hmac values.
se := &logical.StorageEntry{Key: "salt", Value: []byte("foo")}
err := view.Put(context.Background(), se)
if err != nil {
return nil, err
}
n := &NoopAudit{
Config: &audit.BackendConfig{
// Override the salt related config settings.
backendConfig := &audit.BackendConfig{
SaltView: view,
SaltConfig: &salt.Config{
HMAC: sha256.New,
HMACType: "hmac-sha256",
},
Config: config,
},
Config: config.Config,
MountPath: config.MountPath,
}
n := &NoopAudit{Config: backendConfig}
cfg, err := audit.NewFormatterConfig()
if err != nil {
return nil, err
}
f, err := audit.NewEntryFormatter(cfg, n)
f, err := audit.NewEntryFormatter(cfg, n, opts...)
if err != nil {
return nil, fmt.Errorf("error creating formatter: %w", err)
}
fw, err := audit.NewEntryFormatterWriter(cfg, f, &audit.JSONWriter{})
if err != nil {
return nil, fmt.Errorf("error creating formatter writer: %w", err)
}
n.formatter = fw
n.nodeIDList = make([]eventlogger.NodeID, 2)
n.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node, 2)
@@ -264,8 +269,11 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
}
// Wrap the formatting node, so we can get any bytes that were formatted etc.
wrappedFormatter := &noopWrapper{format: "json", node: f, backend: n}
n.nodeIDList[0] = formatterNodeID
n.nodeMap[formatterNodeID] = f
n.nodeMap[formatterNodeID] = wrappedFormatter
sinkNode := event.NewNoopSink()
sinkNodeID, err := event.GenerateNodeID()
@@ -279,9 +287,12 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
return n, nil
}
// NoopAuditFactory should be used when the test needs a way to access bytes that
// have been formatted by the pipeline during audit requests.
// The records parameter will be repointed to the one used within the pipeline.
func NoopAuditFactory(records **[][]byte) audit.Factory {
return func(_ context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) {
n, err := NewNoopAudit(config.Config)
return func(_ context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) {
n, err := NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter))
if err != nil {
return nil, err
}
@@ -293,8 +304,19 @@ func NoopAuditFactory(records **[][]byte) audit.Factory {
}
}
// noopWrapper is designed to wrap a formatter node in order to allow access to
// bytes formatted, headers formatted and parts of the logical.LogInput.
// Some older tests relied on being able to query this information so while those
// tests stick around we should look after them.
type noopWrapper struct {
format string
node eventlogger.Node
backend *NoopAudit
}
type NoopAudit struct {
Config *audit.BackendConfig
ReqErr error
ReqAuth []*logical.Auth
Req []*logical.Request
@@ -309,8 +331,6 @@ type NoopAudit struct {
RespNonHMACKeys [][]string
RespReqNonHMACKeys [][]string
RespErrs []error
formatter *audit.EntryFormatterWriter
records [][]byte
l sync.RWMutex
salt *salt.Salt
@@ -320,70 +340,155 @@ type NoopAudit struct {
nodeMap map[eventlogger.NodeID]eventlogger.Node
}
func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
n.l.Lock()
defer n.l.Unlock()
// Process handles the contortions required by older test code to ensure behavior.
// It will attempt to do some pre/post processing of the logical.LogInput that should
// form part of the event's payload data, as well as capturing the resulting headers
// that were formatted and track the overall bytes that a formatted event uses when
// it's ready to head down the pipeline to the sink node (a noop for us).
func (n *noopWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
n.backend.l.Lock()
defer n.backend.l.Unlock()
if n.formatter != nil {
var w bytes.Buffer
err := n.formatter.FormatAndWriteRequest(ctx, &w, in)
if err != nil {
return err
}
n.records = append(n.records, w.Bytes())
var err error
// We're expecting audit events since this is an audit device.
a, ok := e.Payload.(*audit.AuditEvent)
if !ok {
return nil, errors.New("cannot parse payload as an audit event")
}
n.ReqAuth = append(n.ReqAuth, in.Auth)
n.Req = append(n.Req, in.Request)
n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers)
n.ReqNonHMACKeys = in.NonHMACReqDataKeys
n.ReqErrs = append(n.ReqErrs, in.OuterErr)
in := a.Data
return n.ReqErr
}
// Depending on the type of the audit event (request or response) we need to
// track different things.
switch a.Subtype {
case audit.RequestType:
n.backend.ReqAuth = append(n.backend.ReqAuth, in.Auth)
n.backend.Req = append(n.backend.Req, in.Request)
n.backend.ReqNonHMACKeys = in.NonHMACReqDataKeys
n.backend.ReqErrs = append(n.backend.ReqErrs, in.OuterErr)
func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
n.l.Lock()
defer n.l.Unlock()
if n.formatter != nil {
var w bytes.Buffer
err := n.formatter.FormatAndWriteResponse(ctx, &w, in)
if err != nil {
return err
if n.backend.ReqErr != nil {
return nil, n.backend.ReqErr
}
n.records = append(n.records, w.Bytes())
}
n.RespAuth = append(n.RespAuth, in.Auth)
n.RespReq = append(n.RespReq, in.Request)
n.Resp = append(n.Resp, in.Response)
n.RespErrs = append(n.RespErrs, in.OuterErr)
case audit.ResponseType:
n.backend.RespAuth = append(n.backend.RespAuth, in.Auth)
n.backend.RespReq = append(n.backend.RespReq, in.Request)
n.backend.Resp = append(n.backend.Resp, in.Response)
n.backend.RespErrs = append(n.backend.RespErrs, in.OuterErr)
if in.Response != nil {
n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys)
n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys)
n.backend.RespNonHMACKeys = append(n.backend.RespNonHMACKeys, in.NonHMACRespDataKeys)
n.backend.RespReqNonHMACKeys = append(n.backend.RespReqNonHMACKeys, in.NonHMACReqDataKeys)
}
return n.RespErr
if n.backend.RespErr != nil {
return nil, n.backend.RespErr
}
default:
return nil, fmt.Errorf("unknown audit event type: %q", a.Subtype)
}
// Once we've taken note of the relevant properties of the event, we get the
// underlying (wrapped) node to process it as normal.
e, err = n.node.Process(ctx, e)
if err != nil {
return nil, fmt.Errorf("error processing wrapped node: %w", err)
}
// Once processing has been carried out, the underlying node (a formatter node)
// should contain the output ready for the sink node. We'll get that in order
// to track how many bytes we formatted.
b, ok := e.Format(n.format)
if ok {
n.backend.records = append(n.backend.records, b)
}
// Finally, the last bit of post-processing is to make sure that we track the
// formatted headers that would have made it to the logs via the sink node.
// They only appear in requests.
if a.Subtype == audit.RequestType {
reqEntry := &audit.RequestEntry{}
err = json.Unmarshal(b, &reqEntry)
if err != nil {
return nil, fmt.Errorf("unable to parse formatted audit entry data: %w", err)
}
n.backend.ReqHeaders = append(n.backend.ReqHeaders, reqEntry.Request.Headers)
}
// Return the event and no error in order to let the pipeline continue on.
return e, nil
}
func (n *noopWrapper) Reopen() error {
return n.node.Reopen()
}
func (n *noopWrapper) Type() eventlogger.NodeType {
return n.node.Type()
}
// Deprecated: use eventlogger.
func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
return nil
}
// Deprecated: use eventlogger.
func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
return nil
}
// LogTestMessage will manually crank the handle on the nodes associated with this backend.
func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error {
n.l.Lock()
defer n.l.Unlock()
var w bytes.Buffer
tempFormatter, err := audit.NewTemporaryFormatter(config["format"], config["prefix"])
if err != nil {
return err
// Fake event for test purposes.
e := &eventlogger.Event{
Type: eventlogger.EventType(event.AuditType.String()),
CreatedAt: time.Now(),
Formatted: make(map[string][]byte),
Payload: in,
}
err = tempFormatter.FormatAndWriteResponse(ctx, &w, in)
// Try to get the required format from config and default to JSON.
format, ok := config["format"]
if !ok {
format = "json"
}
cfg, err := audit.NewFormatterConfig(audit.WithFormat(format))
if err != nil {
return err
return fmt.Errorf("cannot create config for formatter node: %w", err)
}
// Create a temporary formatter node for reuse.
f, err := audit.NewEntryFormatter(cfg, n, audit.WithPrefix(config["prefix"]))
// Go over each node in order from our list.
for _, id := range n.nodeIDList {
node, ok := n.nodeMap[id]
if !ok {
return fmt.Errorf("node not found: %v", id)
}
n.records = append(n.records, w.Bytes())
switch node.Type() {
case eventlogger.NodeTypeFormatter:
// Use a temporary formatter node which doesn't persist its salt anywhere.
if formatNode, ok := node.(*audit.EntryFormatter); ok && formatNode != nil {
e, err = f.Process(ctx, e)
// Housekeeping, we should update that we processed some bytes.
if e != nil {
b, ok := e.Format(format)
if ok {
n.records = append(n.records, b)
}
}
}
default:
e, err = node.Process(ctx, e)
}
}
return nil
}
@@ -507,3 +612,23 @@ func NewTestLogger(t testing.T) *TestLogger {
func (tl *TestLogger) StopLogging() {
tl.InterceptLogger.DeregisterSink(tl.sink)
}
func (n *NoopAudit) EventType() eventlogger.EventType {
return eventlogger.EventType(event.AuditType.String())
}
func (n *NoopAudit) HasFiltering() bool {
return false
}
func (n *NoopAudit) Name() string {
return n.Config.MountPath
}
func (n *NoopAudit) Nodes() map[eventlogger.NodeID]eventlogger.Node {
return n.nodeMap
}
func (n *NoopAudit) NodeIDs() []eventlogger.NodeID {
return n.nodeIDList
}

View File

@@ -11,10 +11,8 @@ import (
"fmt"
"io/ioutil"
"math/rand"
"net/url"
"os"
"strings"
"sync/atomic"
"time"
"github.com/armon/go-metrics"
@@ -435,46 +433,9 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by
return newKeys
}
// TestRaftServerAddressProvider is a ServerAddressProvider that uses the
// ClusterAddr() of each node to provide raft addresses.
//
// Note that TestRaftServerAddressProvider should only be used in cases where
// cores that are part of a raft configuration have already had
// startClusterListener() called (via either unsealing or raft joining).
type TestRaftServerAddressProvider struct {
Cluster *vault.TestCluster
}
func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
for _, core := range p.Cluster.Cores {
if core.NodeID == string(id) {
parsed, err := url.Parse(core.ClusterAddr())
if err != nil {
return "", err
}
return raftlib.ServerAddress(parsed.Host), nil
}
}
return "", errors.New("could not find cluster addr")
}
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1)
leader := cluster.Cores[0]
// Seal the leader so we can install an address provider
{
EnsureCoreSealed(t, leader)
leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
cluster.UnsealCore(t, leader)
vault.TestWaitActive(t, leader.Core)
}
leaderInfos := []*raft.LeaderJoinInfo{
{
LeaderAPIAddr: leader.Client.Address(),
@@ -485,7 +446,6 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)

View File

@@ -10,8 +10,6 @@ import (
"os"
"time"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/go-hclog"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
"github.com/hashicorp/vault/audit"
@@ -23,6 +21,7 @@ import (
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical"
@@ -105,7 +104,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun
}
}
func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle {
func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle {
nodeID := fmt.Sprintf("core-%d", coreIdx)
raftDir, err := ioutil.TempDir("", "vault-raft-")
if err != nil {
@@ -118,10 +117,25 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma
logger.Info("raft dir", "dir", raftDir)
backend, err := makeRaftBackend(logger, nodeID, raftDir, extraConf, bridge)
if err != nil {
cleanupFunc()
t.Fatal(err)
}
return &vault.PhysicalBackendBundle{
Backend: backend,
Cleanup: cleanupFunc,
}
}
func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) (physical.Backend, error) {
conf := map[string]string{
"path": raftDir,
"node_id": nodeID,
"performance_multiplier": "8",
"autopilot_reconcile_interval": "300ms",
"autopilot_update_interval": "100ms",
}
for k, v := range extraConf {
val, ok := v.(string)
@@ -132,14 +146,13 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma
backend, err := raft.NewRaftBackend(conf, logger.Named("raft"))
if err != nil {
cleanupFunc()
t.Fatal(err)
return nil, err
}
if bridge != nil {
backend.(*raft.RaftBackend).SetServerAddressProvider(bridge)
}
return &vault.PhysicalBackendBundle{
Backend: backend,
Cleanup: cleanupFunc,
}
return backend, nil
}
// RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend
@@ -222,7 +235,14 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.KeepStandbysSealed = true
opts.PhysicalFactory = MakeRaftBackend
var bridge *raft.ClusterAddrBridge
if !opts.InmemClusterLayers && opts.ClusterLayers == nil {
bridge = raft.NewClusterAddrBridge()
}
conf.ClusterAddrBridge = bridge
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
return MakeRaftBackend(t, coreIdx, logger, conf, bridge)
}
opts.SetupFunc = func(t testing.T, c *vault.TestCluster) {
if opts.NumCores != 1 {
testhelpers.RaftClusterJoinNodes(t, c)
@@ -232,7 +252,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
}
func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) {
opts.KeepStandbysSealed = true
opts.InmemClusterLayers = true
opts.PhysicalFactory = RaftHAFactory(bundler)
}

View File

@@ -9,7 +9,6 @@ import (
"os"
hclog "github.com/hashicorp/go-hclog"
raftlib "github.com/hashicorp/raft"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/physical"
"github.com/hashicorp/vault/vault"
@@ -74,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica
// MakeReusableRaftStorage makes a physical raft backend that can be re-used
// across multiple test clusters in sequence.
func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) {
func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) {
raftDirs := make([]string, numCores)
for i := 0; i < numCores; i++ {
raftDirs[i] = makeRaftDir(t)
@@ -87,7 +86,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add
conf.DisablePerformanceStandby = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false)
return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false)
}
},
@@ -124,9 +123,10 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b
storage := ReusableStorage{
Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.InmemClusterLayers = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true)
haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true)
return &vault.PhysicalBackendBundle{
Backend: bundle.Backend,
@@ -168,25 +168,13 @@ func makeRaftDir(t testing.T) string {
return raftDir
}
func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle {
func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle {
nodeID := fmt.Sprintf("core-%d", coreIdx)
conf := map[string]string{
"path": raftDir,
"node_id": nodeID,
"performance_multiplier": "8",
"autopilot_reconcile_interval": "300ms",
"autopilot_update_interval": "100ms",
}
backend, err := raft.NewRaftBackend(conf, logger)
backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil)
if err != nil {
t.Fatal(err)
}
if addressProvider != nil {
backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
}
bundle := new(vault.PhysicalBackendBundle)
if ha {

View File

@@ -569,10 +569,8 @@ func TestLogical_RespondWithStatusCode(t *testing.T) {
}
func TestLogical_Audit_invalidWrappingToken(t *testing.T) {
t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true")
// Create a noop audit backend
noop := corehelpers.TestNoopAudit(t, nil)
noop := corehelpers.TestNoopAudit(t, "noop/", nil)
c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{
AuditBackends: map[string]audit.Factory{
"noop": func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) {
@@ -584,7 +582,6 @@ func TestLogical_Audit_invalidWrappingToken(t *testing.T) {
defer ln.Close()
// Enable the audit backend
resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{
"type": "noop",
})

View File

@@ -247,8 +247,6 @@ func testServerWithAudit(t *testing.T, records **[][]byte) (net.Listener, string
}
func TestSysGenerateRoot_badKey(t *testing.T) {
t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true")
var records *[][]byte
ln, addr, token, _ := testServerWithAudit(t, &records)
defer ln.Close()

View File

@@ -113,7 +113,11 @@ func WithNow(now time.Time) Option {
// WithFacility provides an Option to represent a 'facility' for a syslog sink.
func WithFacility(facility string) Option {
return func(o *options) error {
facility = strings.TrimSpace(facility)
if facility != "" {
o.withFacility = facility
}
return nil
}
@@ -122,7 +126,11 @@ func WithFacility(facility string) Option {
// WithTag provides an Option to represent a 'tag' for a syslog sink.
func WithTag(tag string) Option {
return func(o *options) error {
tag = strings.TrimSpace(tag)
if tag != "" {
o.withTag = tag
}
return nil
}

View File

@@ -205,7 +205,7 @@ func TestOptions_WithFacility(t *testing.T) {
},
"whitespace": {
Value: " ",
ExpectedValue: " ",
ExpectedValue: "",
},
"value": {
Value: "juan",
@@ -213,7 +213,7 @@ func TestOptions_WithFacility(t *testing.T) {
},
"spacey-value": {
Value: " juan ",
ExpectedValue: " juan ",
ExpectedValue: "juan",
},
}
@@ -243,7 +243,7 @@ func TestOptions_WithTag(t *testing.T) {
},
"whitespace": {
Value: " ",
ExpectedValue: " ",
ExpectedValue: "",
},
"value": {
Value: "juan",
@@ -251,7 +251,7 @@ func TestOptions_WithTag(t *testing.T) {
},
"spacey-value": {
Value: " juan ",
ExpectedValue: " juan ",
ExpectedValue: "juan",
},
}

View File

@@ -0,0 +1,24 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package event
import "github.com/hashicorp/eventlogger"
// PipelineReader surfaces information required for pipeline registration.
type PipelineReader interface {
// EventType should return the event type to be used for pipeline registration.
EventType() eventlogger.EventType
// HasFiltering should determine if filter nodes are used by this pipeline.
HasFiltering() bool
// Name for the pipeline which should be used for the eventlogger.PipelineID.
Name() string
// Nodes should return the nodes which should be used by the framework to process events.
Nodes() map[eventlogger.NodeID]eventlogger.Node
// NodeIDs should return the IDs of the nodes, in the order they are required.
NodeIDs() []eventlogger.NodeID
}

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
@@ -29,9 +30,19 @@ type SocketSink struct {
// NewSocketSink should be used to create a new SocketSink.
// Accepted options: WithMaxDuration and WithSocketType.
func NewSocketSink(format string, address string, opt ...Option) (*SocketSink, error) {
func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, error) {
const op = "event.NewSocketSink"
address = strings.TrimSpace(address)
if address == "" {
return nil, fmt.Errorf("%s: address is required: %w", op, ErrInvalidParameter)
}
format = strings.TrimSpace(format)
if format == "" {
return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
}
opts, err := getOpts(opt...)
if err != nil {
return nil, fmt.Errorf("%s: error applying options: %w", op, err)

View File

@@ -0,0 +1,85 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package event
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
// TestNewSocketSink ensures that we validate the input arguments and can create
// the SocketSink if everything goes to plan.
func TestNewSocketSink(t *testing.T) {
t.Parallel()
tests := map[string]struct {
address string
format string
opts []Option
want *SocketSink
wantErr bool
expectedErrMsg string
}{
"address-empty": {
address: "",
wantErr: true,
expectedErrMsg: "event.NewSocketSink: address is required: invalid parameter",
},
"address-whitespace": {
address: " ",
wantErr: true,
expectedErrMsg: "event.NewSocketSink: address is required: invalid parameter",
},
"format-empty": {
address: "addr",
format: "",
wantErr: true,
expectedErrMsg: "event.NewSocketSink: format is required: invalid parameter",
},
"format-whitespace": {
address: "addr",
format: " ",
wantErr: true,
expectedErrMsg: "event.NewSocketSink: format is required: invalid parameter",
},
"bad-max-duration": {
address: "addr",
format: "json",
opts: []Option{WithMaxDuration("bar")},
wantErr: true,
expectedErrMsg: "event.NewSocketSink: error applying options: time: invalid duration \"bar\"",
},
"happy": {
address: "wss://foo",
format: "json",
want: &SocketSink{
requiredFormat: "json",
address: "wss://foo",
socketType: "tcp", // defaults to tcp
maxDuration: 2 * time.Second, // defaults to 2 secs
},
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := NewSocketSink(tc.address, tc.format, tc.opts...)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
require.Nil(t, got)
} else {
require.NoError(t, err)
require.Equal(t, tc.want, got)
}
})
}
}

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"os"
"strings"
"github.com/hashicorp/eventlogger"
)
@@ -21,10 +22,17 @@ type StdoutSink struct {
// NewStdoutSinkNode creates a new StdoutSink that will persist the events
// it processes using the specified expected format.
func NewStdoutSinkNode(format string) *StdoutSink {
func NewStdoutSinkNode(format string) (*StdoutSink, error) {
const op = "event.NewStdoutSinkNode"
format = strings.TrimSpace(format)
if format == "" {
return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
}
return &StdoutSink{
requiredFormat: format,
}
}, nil
}
// Process persists the provided eventlogger.Event to the standard output stream.

View File

@@ -6,6 +6,7 @@ package event
import (
"context"
"fmt"
"strings"
gsyslog "github.com/hashicorp/go-syslog"
@@ -25,6 +26,11 @@ type SyslogSink struct {
func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) {
const op = "event.NewSyslogSink"
format = strings.TrimSpace(format)
if format == "" {
return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
}
opts, err := getOpts(opt...)
if err != nil {
return nil, fmt.Errorf("%s: error applying options: %w", op, err)

View File

@@ -0,0 +1,57 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package event
import (
"testing"
"github.com/stretchr/testify/require"
)
// TestNewSyslogSink ensures that we validate the input arguments and can create
// the SyslogSink if everything goes to plan.
func TestNewSyslogSink(t *testing.T) {
t.Parallel()
tests := map[string]struct {
format string
opts []Option
want *SyslogSink
wantErr bool
expectedErrMsg string
}{
"format-empty": {
format: "",
wantErr: true,
expectedErrMsg: "event.NewSyslogSink: format is required: invalid parameter",
},
"format-whitespace": {
format: " ",
wantErr: true,
expectedErrMsg: "event.NewSyslogSink: format is required: invalid parameter",
},
"happy": {
format: "json",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
got, err := NewSyslogSink(tc.format, tc.opts...)
if tc.wantErr {
require.Error(t, err)
require.EqualError(t, err, tc.expectedErrMsg)
require.Nil(t, got)
} else {
require.NoError(t, err)
require.NotNil(t, got)
}
})
}
}

View File

@@ -11,6 +11,7 @@ import (
"io"
"io/ioutil"
"math/rand"
"net/url"
"os"
"path/filepath"
"strconv"
@@ -311,6 +312,33 @@ func EnsurePath(path string, dir bool) error {
return os.MkdirAll(path, 0o700)
}
func NewClusterAddrBridge() *ClusterAddrBridge {
return &ClusterAddrBridge{
clusterAddressByNodeID: make(map[string]string),
}
}
type ClusterAddrBridge struct {
l sync.RWMutex
clusterAddressByNodeID map[string]string
}
func (c *ClusterAddrBridge) UpdateClusterAddr(nodeId string, clusterAddr string) {
c.l.Lock()
defer c.l.Unlock()
cu, _ := url.Parse(clusterAddr)
c.clusterAddressByNodeID[nodeId] = cu.Host
}
func (c *ClusterAddrBridge) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) {
c.l.RLock()
defer c.l.RUnlock()
if addr, ok := c.clusterAddressByNodeID[string(id)]; ok {
return raft.ServerAddress(addr), nil
}
return "", fmt.Errorf("could not find cluster addr for id=%s", id)
}
// NewRaftBackend constructs a RaftBackend using the given directory
func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
path := os.Getenv(EnvVaultRaftPath)
@@ -1344,7 +1372,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e
if b.raft == nil {
return errors.New("raft storage is not initialized")
}
b.logger.Trace("adding server to raft", "id", peerID)
b.logger.Trace("adding server to raft", "id", peerID, "addr", clusterAddr)
future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0)
return future.Error()
}
@@ -1353,7 +1381,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e
return errors.New("raft storage autopilot is not initialized")
}
b.logger.Trace("adding server to raft via autopilot", "id", peerID)
b.logger.Trace("adding server to raft via autopilot", "id", peerID, "addr", clusterAddr)
return b.autopilot.AddServer(&autopilot.Server{
ID: raft.ServerID(peerID),
Name: peerID,

View File

@@ -174,7 +174,7 @@ func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) {
leaderActiveTimes := make(map[int]time.Time)
for i, node := range cluster.Nodes() {
client := node.APIClient()
ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
resp, err := client.Sys().LeaderWithContext(ctx)
cancel()
if err != nil || resp == nil || !resp.IsSelf {

View File

@@ -20,3 +20,36 @@ type MarshalOptions struct {
type OptMarshaler interface {
MarshalJSONWithOptions(*MarshalOptions) ([]byte, error)
}
// LogInputBexpr is used for evaluating boolean expressions with go-bexpr.
type LogInputBexpr struct {
MountPoint string `bexpr:"mount_point"`
MountType string `bexpr:"mount_type"`
Namespace string `bexpr:"namespace"`
Operation string `bexpr:"operation"`
Path string `bexpr:"path"`
}
// BexprDatum returns values from a LogInput formatted for use in evaluating go-bexpr boolean expressions.
// The namespace should be supplied from the current request's context.
func (l *LogInput) BexprDatum(namespace string) *LogInputBexpr {
var mountPoint string
var mountType string
var operation string
var path string
if l.Request != nil {
mountPoint = l.Request.MountPoint
mountType = l.Request.MountType
operation = string(l.Request.Operation)
path = l.Request.Path
}
return &LogInputBexpr{
MountPoint: mountPoint,
MountType: mountType,
Namespace: namespace,
Operation: operation,
Path: path,
}
}

77
sdk/logical/audit_test.go Normal file
View File

@@ -0,0 +1,77 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package logical
import (
"testing"
"github.com/stretchr/testify/require"
)
// TestLogInput_BexprDatum ensures that we can transform a LogInput
// into a LogInputBexpr to be used in audit filtering.
func TestLogInput_BexprDatum(t *testing.T) {
t.Parallel()
tests := map[string]struct {
Request *Request
Namespace string
ExpectedPath string
ExpectedMountPoint string
ExpectedMountType string
ExpectedNamespace string
ExpectedOperation string
}{
"nil-no-namespace": {
Request: nil,
Namespace: "",
ExpectedPath: "",
ExpectedMountPoint: "",
ExpectedMountType: "",
ExpectedNamespace: "",
ExpectedOperation: "",
},
"nil-namespace": {
Request: nil,
Namespace: "juan",
ExpectedPath: "",
ExpectedMountPoint: "",
ExpectedMountType: "",
ExpectedNamespace: "juan",
ExpectedOperation: "",
},
"happy-path": {
Request: &Request{
MountPoint: "IAmAMountPoint",
MountType: "IAmAMountType",
Operation: CreateOperation,
Path: "IAmAPath",
},
Namespace: "juan",
ExpectedPath: "IAmAPath",
ExpectedMountPoint: "IAmAMountPoint",
ExpectedMountType: "IAmAMountType",
ExpectedNamespace: "juan",
ExpectedOperation: "create",
},
}
for name, tc := range tests {
name := name
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
l := &LogInput{Request: tc.Request}
d := l.BexprDatum(tc.Namespace)
require.Equal(t, tc.ExpectedPath, d.Path)
require.Equal(t, tc.ExpectedMountPoint, d.MountPoint)
require.Equal(t, tc.ExpectedMountType, d.MountType)
require.Equal(t, tc.ExpectedNamespace, d.Namespace)
require.Equal(t, tc.ExpectedOperation, d.Operation)
})
}
}

View File

@@ -46,10 +46,11 @@
Logging in with a SAML auth method requires a browser in a secure context.
</A.Description>
<A.Description class="has-top-margin-xs">
<ExternalLink @href="https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts">
Read more about secure contexts.
<Icon @name="external-link" />
</ExternalLink>
<Hds::Link::Standalone
@icon="external-link"
@text="Read more about secure contexts."
@href="https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts"
/>
</A.Description>
</Hds::Alert>
{{/if}}

View File

@@ -28,8 +28,6 @@ export default class CalendarWidget extends Component {
currentDate = timestamp.now();
@tracked calendarDisplayDate = this.currentDate; // init to current date, updates when user clicks on calendar chevrons
@tracked showCalendar = false;
@tracked tooltipTarget = null;
@tracked tooltipText = null;
// both date getters return a date object
get startDate() {
@@ -72,20 +70,6 @@ export default class CalendarWidget extends Component {
});
}
@action
addTooltip() {
if (this.disablePastYear) {
const previousYear = this.displayYear - 1;
this.tooltipText = `${previousYear} is unavailable because it is before your start date. Change your start month to a date in ${previousYear} to see data for this year.`;
this.tooltipTarget = '#previous-year';
}
}
@action
removeTooltip() {
this.tooltipTarget = null;
}
@action
addYear() {
this.calendarDisplayDate = addYears(this.calendarDisplayDate, 1);

View File

@@ -23,11 +23,11 @@
</div>
{{/if}}
<div class="field">
<Toolbar>
<Toolbar aria-label="toolbar for managing {{or @model.name 'new'}} policy">
<label class="has-text-weight-bold has-right-margin-xxs">Policy</label>
{{#if @renderPolicyExampleModal}}
{{! only true in policy create and edit routes }}
<ToolbarFilters>
<ToolbarFilters aria-label="help tools for managing {{or @model.name 'new'}} policy">
<Hds::Button
@text="How to write a policy"
@icon="bulb"
@@ -38,7 +38,7 @@
/>
</ToolbarFilters>
{{/if}}
<ToolbarActions>
<ToolbarActions aria-label="actions for managing {{or @model.name 'new'}} policy">
<div class="toolbar-separator"></div>
{{#if @model.isNew}}
<div class="control is-flex">

View File

@@ -13,7 +13,7 @@
@icon="vault"
@route="vault.cluster.dashboard"
@model={{this.currentCluster.cluster.name}}
@ariaLabel="home link"
@ariaLabel="Vault home"
data-test-sidebar-logo
/>
</:logo>
@@ -31,7 +31,7 @@
{{! this block is where the Hds::SideNav::Portal components render into }}
<:body>
<Hds::SideNav::Portal::Target aria-label="sidebar navigation links" />
<Hds::SideNav::Portal::Target />
</:body>
<:footer>

View File

@@ -50,9 +50,7 @@
{{#if
(or
(and
this.namespace.inRootNamespace (has-permission "status" routeParams=(array "replication" "raft" "license" "seal"))
)
(and this.isRootNamespace (has-permission "status" routeParams=(array "replication" "raft" "license" "seal")))
(has-permission "clients" routeParams="activity")
)
}}
@@ -61,7 +59,7 @@
{{#if
(and
this.version.isEnterprise
this.namespace.inRootNamespace
this.isRootNamespace
(not this.cluster.replicationRedacted)
(has-permission "status" routeParams="replication")
)
@@ -73,7 +71,7 @@
@hasSubItems={{true}}
/>
{{/if}}
{{#if (and this.cluster.usingRaft this.namespace.inRootNamespace (has-permission "status" routeParams="raft"))}}
{{#if (and this.cluster.usingRaft this.isRootNamespace (has-permission "status" routeParams="raft"))}}
<Nav.Link
@route="vault.cluster.storage"
@model={{this.cluster.name}}
@@ -87,7 +85,7 @@
{{#if
(and
this.version.features
this.namespace.inRootNamespace
this.isRootNamespace
(has-permission "status" routeParams="license")
(not this.cluster.dr.isSecondary)
)
@@ -99,7 +97,7 @@
data-test-sidebar-nav-link="License"
/>
{{/if}}
{{#if (and this.namespace.inRootNamespace (has-permission "status" routeParams="seal") (not this.cluster.dr.isSecondary))}}
{{#if (and this.isRootNamespace (has-permission "status" routeParams="seal") (not this.cluster.dr.isSecondary))}}
<Nav.Link
@route="vault.cluster.settings.seal"
@model={{this.cluster.name}}

View File

@@ -15,4 +15,9 @@ export default class SidebarNavClusterComponent extends Component {
get cluster() {
return this.currentCluster.cluster;
}
get isRootNamespace() {
// should only return true if we're in the true root namespace
return this.namespace.inRootNamespace && !this.currentCluster.hasChrootNamespace;
}
}

View File

@@ -10,6 +10,7 @@ import Component from '@ember/component';
import { setProperties, computed, set } from '@ember/object';
import { addSeconds, parseISO } from 'date-fns';
import { A } from '@ember/array';
import { capitalize } from '@ember/string';
const DEFAULTS = {
token: null,
@@ -30,6 +31,7 @@ const DEFAULTS = {
const WRAPPING_ENDPOINTS = ['lookup', 'wrap', 'unwrap', 'rewrap'];
export default Component.extend(DEFAULTS, {
flashMessages: service(),
store: service(),
// putting these attrs here so they don't get reset when you click back
//random
@@ -97,6 +99,7 @@ export default Component.extend(DEFAULTS, {
props = assign({}, props, { [keyName]: resp.wrap_info.token });
}
setProperties(this, props);
this.flashMessages.success(`${capitalize(action)} was successful.`);
},
getData() {

View File

@@ -51,7 +51,7 @@ const ModelExport = AuthMethodModel.extend({
}),
sealWrap: attr('boolean', {
helpText:
'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For K/V mounts, all values will be seal wrapped.) This can only be specified at mount time.',
'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For KV mounts, all values will be seal wrapped.) This can only be specified at mount time.',
}),
// used when the `auth` prefix is important,

View File

@@ -52,7 +52,7 @@ export default class SecretEngineModel extends Model {
local;
@attr('boolean', {
helpText:
'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For K/V mounts, all values will be seal wrapped.) This can only be specified at mount time.',
'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For KV mounts, all values will be seal wrapped.) This can only be specified at mount time.',
})
sealWrap;
@attr('boolean') externalEntropyAccess;

View File

@@ -4,6 +4,7 @@
*/
import Service, { inject as service } from '@ember/service';
import { sanitizePath, sanitizeStart } from 'core/utils/sanitize-path';
import { task } from 'ember-concurrency';
const API_PATHS = {
@@ -65,6 +66,7 @@ export default Service.extend({
globPaths: null,
canViewAll: null,
readFailed: false,
chrootNamespace: null,
store: service(),
auth: service(),
namespace: service(),
@@ -89,6 +91,7 @@ export default Service.extend({
this.set('exactPaths', resp.data.exact_paths);
this.set('globPaths', resp.data.glob_paths);
this.set('canViewAll', resp.data.root);
this.set('chrootNamespace', resp.data.chroot_namespace);
this.set('readFailed', false);
},
@@ -97,6 +100,7 @@ export default Service.extend({
this.set('globPaths', null);
this.set('canViewAll', null);
this.set('readFailed', false);
this.set('chrootNamespace', null);
},
hasNavPermission(navItem, routeParams, requireAll) {
@@ -124,20 +128,21 @@ export default Service.extend({
},
pathNameWithNamespace(pathName) {
const namespace = this.namespace.path;
const namespace = this.chrootNamespace
? `${sanitizePath(this.chrootNamespace)}/${sanitizePath(this.namespace.path)}`
: sanitizePath(this.namespace.path);
if (namespace) {
return `${namespace}/${pathName}`;
return `${sanitizePath(namespace)}/${sanitizeStart(pathName)}`;
} else {
return pathName;
}
},
hasPermission(pathName, capabilities = [null]) {
const path = this.pathNameWithNamespace(pathName);
if (this.canViewAll) {
return true;
}
const path = this.pathNameWithNamespace(pathName);
return capabilities.every(
(capability) =>

View File

@@ -37,6 +37,6 @@
}
.is-label {
color: $grey;
color: var(--token-color-foreground-faint);
}
}

View File

@@ -32,30 +32,6 @@ $dark-gray: #535f73;
.calendar-widget {
grid-area: calendar-widget;
> button {
&.is-month-list {
background-color: $white;
color: black;
text-align: center;
border: $light-border;
border-radius: $radius;
}
&.is-current-month {
border: 1px solid $ui-gray-900;
}
&:hover {
background-color: lighten($dark-gray, 30%);
color: $white;
text-align: center;
cursor: pointer;
}
&.is-readOnly {
background-color: $ui-gray-100;
color: lighten($dark-gray, 30%);
pointer-events: none;
}
}
}
.border-col {
@@ -84,15 +60,6 @@ $dark-gray: #535f73;
}
// for modal-dialog tooltips
.calendar-tooltip {
background-color: $ui-gray-700;
color: $white;
font-size: $size-8;
padding: $spacing-10;
border-radius: $radius-large;
width: 141px;
}
.ember-modal-dialog {
z-index: 1000;
}

View File

@@ -50,6 +50,10 @@ $console-close-height: 35px;
margin-left: $spacing-20;
padding: $spacing-12 $spacing-20;
}
.console-ui-panel-intro {
color: var(--token-color-palette-neutral-400);
}
}
.console-ui-input {
@@ -83,6 +87,7 @@ $console-close-height: 35px;
padding-right: $spacing-36;
position: relative;
background-color: rgba(#000, 0);
color: var(--token-color-palette-neutral-400);
&:hover {
background-color: rgba(#000, 0.5);
}
@@ -91,12 +96,24 @@ $console-close-height: 35px;
.console-ui-alert {
margin-left: calc(#{$spacing-20} - 0.33rem);
position: relative;
color: var(--token-color-palette-neutral-400);
svg {
position: absolute;
left: 0;
top: 0;
}
&.console-ui-alert--error {
// HDS tokens are not light enough on the dark background to pass a11y tests.
// hex value for --token-color-foreground-critical
color: lighten(#e52228, 20%);
}
&.console-ui-alert--success {
// HDS tokens are not light enough on the dark background to pass a11y tests.
// hex value for --token-color-foreground-success
color: lighten(#008a22, 20%);
}
}
.panel-open .console-ui-panel {

View File

@@ -5,7 +5,6 @@
.doc-link {
color: $blue;
text-decoration: none;
font-weight: $font-weight-semibold;
&:hover {
text-decoration: underline !important;

View File

@@ -5,8 +5,8 @@
.empty-state {
align-items: center;
color: $grey;
background: $ui-gray-010;
color: var(--token-color-foreground-faint);
background: var(--token-color-surface-faint);
display: flex;
justify-content: center;
padding: $spacing-48 $spacing-12;
@@ -15,7 +15,7 @@
.empty-state-transparent {
align-items: center;
color: $grey;
color: var(--token-color-foreground-faint);
background: transparent;
display: flex;
justify-content: center;
@@ -50,12 +50,6 @@
display: flex;
justify-content: space-between;
a,
.link,
a:not(.button):not(.file-delete-button):not(.tag) {
text-decoration: none;
}
> * + * {
margin-left: $spacing-12;
margin-right: $spacing-12;

View File

@@ -3,8 +3,9 @@
* SPDX-License-Identifier: BUSL-1.1
*/
.masked-font {
color: $ui-gray-300;
.masked-font,
pre.masked-font {
color: var(--token-color-foreground-faint);
}
.masked-input {
@@ -84,7 +85,7 @@
}
.masked-input.masked .masked-value {
color: $grey-light;
color: var(--token-color-foreground-faint);
}
.masked-input .input:focus + .masked-input-toggle {

View File

@@ -83,7 +83,7 @@
}
.radio-card-message-body {
line-height: 1.2;
color: $ui-gray-500;
color: var(--token-color-foreground-faint);
font-size: $size-8;
}

View File

@@ -113,7 +113,7 @@ div > .ember-power-select-options {
}
.search-select-list-key {
color: $grey;
color: var(--token-color-foreground-faint);
font-size: $size-8;
}

View File

@@ -7,21 +7,24 @@
font-size: $size-7;
text-transform: none;
margin: 8px 0px 0 -4px;
border: none;
border-radius: $radius-large;
.box {
position: relative;
color: $white;
max-width: 200px;
background: $grey;
background: $black;
padding: 0.5rem;
line-height: 1.4;
border-radius: $radius-large;
}
.fit-content {
max-width: fit-content;
}
@include css-top-arrow(8px, $grey, 1px, $grey-dark, 20px);
@include css-top-arrow(8px, $black, 1px, $black, 20px);
&.ember-basic-dropdown-content--below.ember-basic-dropdown--transitioning-in {
animation: drop-fade-above 0.15s;
}
@@ -53,17 +56,17 @@
}
.ember-basic-dropdown-content--below.ember-basic-dropdown-content--left.tool-tip {
@include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px));
@include css-top-arrow(8px, $black, 1px, $black, calc(100% - 20px));
}
.ember-basic-dropdown-content--below.ember-basic-dropdown-content--right.tool-tip {
@include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px));
@include css-top-arrow(8px, $black, 1px, $black, calc(100% - 20px));
}
.ember-basic-dropdown-content--above.tool-tip {
@include css-bottom-arrow(8px, $grey, 1px, $grey-dark);
@include css-bottom-arrow(8px, $black, 1px, $black);
margin-top: -8px;
}
.ember-basic-dropdown-content--above.ember-basic-dropdown-content--right.tool-tip {
@include css-bottom-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px));
@include css-bottom-arrow(8px, $black, 1px, $black, calc(100% - 20px));
}
.b-checkbox .tool-tip-trigger {

View File

@@ -250,7 +250,7 @@ p.data-details {
}
.chart-tooltip {
background-color: $ui-gray-700;
background-color: $black;
color: white;
font-size: $size-9;
padding: 6px;
@@ -282,7 +282,7 @@ p.data-details {
height: 0;
border-left: 5px solid transparent;
border-right: 5px solid transparent;
border-top: 9px solid $ui-gray-700;
border-top: 9px solid $black;
position: absolute;
opacity: 0.8;
bottom: -9px;

View File

@@ -33,7 +33,8 @@ h6 {
a:hover,
body,
pre,
// default set here is too dark for HDS codeblock
pre:not(.hds-code-block__code),
strong,
table th {
color: $ui-gray-900;

View File

@@ -81,5 +81,5 @@
.input-hint {
padding: 0 $spacing-10;
font-size: $size-8;
color: $grey;
color: var(--token-color-foreground-faint);
}

View File

@@ -9,15 +9,23 @@
.jsondiffpatch-deleted pre,
.jsondiffpatch-modified .jsondiffpatch-left-value pre,
.jsondiffpatch-textdiff-deleted {
background: $red-500;
background: var(--token-color-foreground-critical-high-contrast);
}
.jsondiffpatch-added .jsondiffpatch-property-name,
.jsondiffpatch-added .jsondiffpatch-value pre,
.jsondiffpatch-modified .jsondiffpatch-right-value pre,
.jsondiffpatch-textdiff-added {
background: $green-500;
background: var(--token-color-foreground-success-high-contrast);
}
.jsondiffpatch-property-name {
color: $ui-gray-300;
}
.jsondiffpatch-added > .jsondiffpatch-property-name {
color: var(--token-color-surface-success);
}
.jsondiffpatch-deleted > .jsondiffpatch-property-name {
color: var(--token-color-surface-critical);
}

View File

@@ -21,7 +21,3 @@
cursor: default;
}
}
// NICE TO HAVE: replace all instances with helper "is-no-underline"
.link-plain {
text-decoration: none;
}

View File

@@ -7,9 +7,10 @@
.tag:not(body) {
align-items: center;
background-color: $ui-gray-100;
// same as HDS::Badge @color=neutral
background-color: var(--token-color-surface-strong);
color: var(--token-color-foreground-primary);
border-radius: $radius;
color: $grey;
display: inline-flex;
font-size: $size-8;
font-weight: $font-weight-normal;
@@ -22,7 +23,7 @@
vertical-align: middle;
code {
color: $grey;
color: var(--token-color-foreground-primary);
}
.icon {

View File

@@ -60,15 +60,11 @@ select.has-error-border,
}
.has-text-grey-light {
color: $ui-gray-300 !important;
}
.has-text-grey-400 {
color: $ui-gray-400;
color: var(--token-color-foreground-faint) !important;
}
.has-text-grey {
color: $ui-gray-500 !important;
color: var(--token-color-foreground-faint) !important;
}
.has-text-grey-dark {
@@ -92,7 +88,7 @@ select.has-error-border,
}
.has-text-success {
color: $green-500 !important;
color: var(--token-color-foreground-success) !important;
}
.has-text-highlight {

View File

@@ -101,7 +101,7 @@
}
.sub-text {
color: $grey;
color: var(--token-color-foreground-faint);
margin-bottom: $size-11;
font-size: $size-8;

View File

@@ -140,6 +140,7 @@
autocomplete="off"
spellcheck="false"
data-test-token={{true}}
id="token"
/>
</div>
</div>

View File

@@ -28,16 +28,17 @@
</div>
{{#unless this.isOIDC}}
<div class="field">
<label for="token" class="is-label">JWT Token</label>
<label for="jwt-token" class="is-label">JWT Token</label>
<div class="control">
<Input
@type="password"
@value={{this.jwt}}
id="jwt-token"
name="jwt"
class="input"
autocomplete="off"
spellcheck="false"
data-test-jwt={{true}}
data-test-jwt
/>
</div>
</div>

View File

@@ -11,16 +11,14 @@
<Chevron @direction="down" @isButton={{true}} />
</D.Trigger>
<D.Content @defaultClass={{concat "popup-menu-content calendar-content" (if this.showCalendar " calendar-open")}}>
<nav class="box menu">
<div class="calendar-title is-subtitle-gray">
DATE OPTIONS
</div>
<nav class="box menu" aria-label="calendar date selector navigation">
<Hds::Text::Body class="calendar-title" @color="faint">DATE OPTIONS</Hds::Text::Body>
{{! TODO Hds::Dropdown swapout }}
<ul class="menu-list">
<li class="action">
<button
data-test-current-month
class="link link-plain has-text-weight-semibold is-ghost"
class="link is-no-underline has-text-weight-semibold is-ghost"
type="button"
name="currentMonth"
{{on "click" (fn this.handleDateShortcut D.actions)}}
@@ -31,7 +29,7 @@
<li class="action">
<button
data-test-current-billing-period
class="link link-plain has-text-weight-semibold is-ghost"
class="link is-no-underline has-text-weight-semibold is-ghost"
type="button"
name="reset"
{{on "click" (fn this.handleDateShortcut D.actions)}}
@@ -42,7 +40,7 @@
<li class="action">
<button
data-test-show-calendar
class={{concat "link link-plain has-text-weight-semibold is-ghost" (if this.showCalendar " is-active")}}
class={{concat "link is-no-underline has-text-weight-semibold is-ghost" (if this.showCalendar " is-active")}}
type="button"
{{on "click" this.toggleShowCalendar}}
>
@@ -59,46 +57,72 @@
{{#if this.showCalendar}}
<div class="calendar-widget-container" data-test-calendar-widget-container>
<div class="select-year">
{{#if this.disablePastYear}}
<Hds::TooltipButton
@text={{concat
(sub this.displayYear 1)
" is unavailable because it is before your start date. Change your start month to a date in "
(sub this.displayYear 1)
" to see data for this year."
}}
aria-label="More information about why previous year is disabled"
>
<Hds::Button
data-test-previous-year
id="previous-year"
disabled={{this.disablePastYear}}
disabled={{true}}
{{on "click" this.subYear}}
@color="secondary"
@text="previous year"
@icon="chevron-left"
@isIconOnly={{true}}
/>
<p data-test-display-year>
</Hds::TooltipButton>
{{else}}
<Hds::Button
data-test-previous-year
id="previous-year"
{{on "click" this.subYear}}
@color="secondary"
@text="previous year"
@icon="chevron-left"
@isIconOnly={{true}}
/>
{{/if}}
<Hds::Text::Display data-test-display-year>
{{this.displayYear}}
</p>
</Hds::Text::Display>
{{#if this.disableFutureYear}}
<Hds::TooltipButton
@text={{concat
(add this.displayYear 1)
" is unavailable because data is only available up to the current month."
}}
aria-label="More information about why next year is disabled"
>
<Hds::Button
data-test-next-year
id="next-year"
disabled={{true}}
{{on "click" this.addYear}}
@color="secondary"
@text="next year"
@icon="chevron-right"
@isIconOnly={{true}}
/>
</Hds::TooltipButton>
{{else}}
<Hds::Button
data-test-next-year
id="next-year"
disabled={{this.disableFutureYear}}
{{on "click" this.addYear}}
@color="secondary"
@text="next year"
@icon="chevron-right"
@isIconOnly={{true}}
/>
{{#if this.tooltipTarget}}
{{! Component must be in curly bracket notation }}
{{! template-lint-disable no-curly-component-invocation }}
{{#modal-dialog
tagName="div"
tetherTarget=this.tooltipTarget
targetAttachment="top right"
attachment="top middle"
offset="150px 0"
}}
<div class={{"calendar-tooltip"}}>
<p>
{{this.tooltipText}}
</p>
</div>
<div class="chart-tooltip-arrow"></div>
{{/modal-dialog}}
{{/if}}
</div>
<div class="calendar-widget-grid calendar-widget">
@@ -107,7 +131,7 @@
@text={{month.name}}
@color="secondary"
@size="small"
class="is-month-list {{if month.readonly 'is-readOnly'}}"
disabled={{month.readonly}}
data-test-calendar-month={{month.name}}
id={{month.index}}
{{on "click" (fn this.selectMonth month D.actions)}}

View File

@@ -6,8 +6,8 @@
<div class="box is-sideless is-fullwidth is-marginless is-bottomless">
<p class="has-bottom-margin-xl">
This dashboard will surface Vault client usage over time. Clients represent a user or service that has authenticated to
Vault. Documentation is available
<DocLink @path="/vault/docs/concepts/client-count">here.</DocLink>
Vault.
<Hds::Link::Inline @href={{doc-link "/vault/docs/concepts/client-count"}}>Documentation is available here</Hds::Link::Inline>.
Date queries are sent in UTC.
</p>
<h2 class="title is-6 has-bottom-margin-xs">

View File

@@ -4,8 +4,9 @@
~}}
<div class="tabs-container box is-sideless is-fullwidth is-paddingless is-marginless">
<nav class="tabs">
<nav class="tabs" aria-label="navigation to configure A-W-S backend">
<ul>
<li>
<LinkTo
@route="vault.cluster.settings.configure-secret-backend"
@model={{@model.id}}
@@ -14,6 +15,8 @@
>
Dynamic IAM root credentials
</LinkTo>
</li>
<li>
<LinkTo
@route="vault.cluster.settings.configure-secret-backend"
@model={{@model.id}}
@@ -22,6 +25,7 @@
>
Leases
</LinkTo>
</li>
</ul>
</nav>
</div>

View File

@@ -9,27 +9,21 @@
{{else}}
<Chevron />
{{/if}}
<input onkeyup={{action "handleKeyUp"}} value={{this.value}} autocomplete="off" spellcheck="false" />
<ToolTip @horizontalPosition="auto-right" @verticalPosition="above" as |d|>
<d.Trigger>
<input
aria-label="command input"
onkeyup={{action "handleKeyUp"}}
value={{this.value}}
autocomplete="off"
spellcheck="false"
/>
<Hds::Button
class="hds-side-nav__icon-button"
{{on "click" (action "fullscreen")}}
{{hds-tooltip (if this.isFullscreen "minimize" "maximize")}}
data-test-tool-tip-trigger
@icon={{if this.isFullscreen "minimize" "maximize"}}
@text={{if this.isFullscreen "Minimize" "Maximize"}}
@isIconOnly={{true}}
/>
</d.Trigger>
<d.Content @defaultClass="tool-tip">
<div class="box">
{{#if this.isFullscreen}}
Minimize
{{else}}
Maximize
{{/if}}
</div>
</d.Content>
</ToolTip>
</div>
<NamespaceReminder @class="console-reminder" @mode="execute" @noun="command" />

View File

@@ -4,7 +4,7 @@
~}}
{{! template-lint-disable no-triple-curlies}}
<div class="console-ui-alert has-text-danger">
<div class="console-ui-alert console-ui-alert--error">
<Icon @name="x-circle-fill" />
<pre>{{{@content}}}</pre>
</div>

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-alert has-text-danger">
<div class="console-ui-alert console-ui-alert--error">
<Icon @name="x-circle-fill" />
<pre>{{@content}}</pre>
</div>

View File

@@ -5,7 +5,7 @@
{{! template-lint-disable no-whitespace-for-layout }}
{{! prettier-ignore }}
<div class="console-ui-alert has-text-grey">
<div class="console-ui-alert">
<Icon @name="info" />
<pre>Usage: vault &lt;command&gt; [args]

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-output has-text-grey">
<div class="console-ui-output">
<JsonEditor
@showToolbar={{false}}
@value={{stringify this.content}}

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-output has-text-grey">
<div class="console-ui-output">
<pre>
Keys
{{#each this.list as |item|}}

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-output has-text-grey">
<div class="console-ui-output">
<pre>{{this.columns}}</pre>
<Hds::Copy::Button
@text="Copy"

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-alert has-text-success">
<div class="console-ui-alert console-ui-alert--success">
<Icon @name="check-circle-fill" />
<pre>{{@content}}</pre>
</div>

View File

@@ -3,7 +3,7 @@
SPDX-License-Identifier: BUSL-1.1
~}}
<div class="console-ui-output has-text-grey">
<div class="console-ui-output">
<pre>{{@content}}</pre>
<Hds::Copy::Button
@text="Copy"

View File

@@ -15,18 +15,19 @@
</div>
<div class="console-ui-panel-content">
<div class="content has-bottom-margin-l">
<p class="has-text-grey is-font-mono has-bottom-margin-s">
<p class="console-ui-panel-intro is-font-mono has-bottom-margin-s">
The Vault Browser CLI provides an easy way to execute common Vault CLI commands, such as write, read, delete, and list.
It does not include kv v2 write or put commands. For guidance, type `help`.
</p>
<p class="has-text-grey is-font-mono has-bottom-margin-s">Examples:</p>
<p class="has-text-grey is-font-mono">→ Write secrets to kv v1: write &lt;mount&gt;/my-secret foo=bar</p>
<p class="has-text-grey is-font-mono">→ List kv v1 secret keys: list &lt;mount&gt;/</p>
<p class="has-text-grey is-font-mono">→ Read a kv v1 secret: read &lt;mount&gt;/my-secret</p>
<p class="has-text-grey is-font-mono">→ Mount a kv v2 secret engine: write sys/mounts/&lt;mount&gt; type=kv
<p class="console-ui-panel-intro is-font-mono has-bottom-margin-s">Examples:</p>
<p class="console-ui-panel-intro is-font-mono">→ Write secrets to kv v1: write &lt;mount&gt;/my-secret foo=bar</p>
<p class="console-ui-panel-intro is-font-mono">→ List kv v1 secret keys: list &lt;mount&gt;/</p>
<p class="console-ui-panel-intro is-font-mono">→ Read a kv v1 secret: read &lt;mount&gt;/my-secret</p>
<p class="console-ui-panel-intro is-font-mono">→ Mount a kv v2 secret engine: write sys/mounts/&lt;mount&gt; type=kv
options=version=2</p>
<p class="has-text-grey is-font-mono">→ Read a kv v2 secret: kv-get &lt;mount&gt;/secret-path</p>
<p class="has-text-grey is-font-mono">→ Read a kv v2 secret's metadata: kv-get &lt;mount&gt;/secret-path -metadata</p>
<p class="console-ui-panel-intro is-font-mono">→ Read a kv v2 secret: kv-get &lt;mount&gt;/secret-path</p>
<p class="console-ui-panel-intro is-font-mono">→ Read a kv v2 secret's metadata: kv-get &lt;mount&gt;/secret-path
-metadata</p>
</div>
<Console::OutputLog @outputLog={{this.cliLog}} />
<Console::CommandInput

View File

@@ -60,6 +60,7 @@
autocomplete="off"
spellcheck="false"
name="token"
id="token"
@value={{this.token}}
/>
</div>

View File

@@ -9,7 +9,7 @@
Client count
</h3>
<LinkTo @route="vault.cluster.clients.dashboard" class="is-no-underline">Details</LinkTo>
<LinkTo @route="vault.cluster.clients.dashboard">Details</LinkTo>
</div>
<hr class="has-background-gray-100" />

View File

@@ -28,7 +28,7 @@
</div>
{{#if this.selectedEngine}}
<h4 class="title is-6" data-test-card-subtitle="secrets-engines">Action</h4>
<h4 id="action-select-title" class="title is-6" data-test-card-subtitle="secrets-engines">Action</h4>
<Select
@name="action-select"
@options={{this.actionOptions}}
@@ -36,6 +36,7 @@
@selectedValue={{this.selectedAction}}
@onChange={{this.setSelectedAction}}
@noDefault={{true}}
@ariaLabel="Action"
/>
{{#if this.searchSelectParams.model}}

View File

@@ -10,7 +10,7 @@
</h3>
{{#if (or @replication.dr.clusterId @replication.performance.clusterId)}}
<LinkTo class="is-no-underline" @route="vault.cluster.replication.index">
<LinkTo @route="vault.cluster.replication.index">
Details
</LinkTo>
{{/if}}

View File

@@ -12,7 +12,7 @@
<h3 class="title is-4 has-left-margin-xxs" data-test-dashboard-card-header="Secrets engines">Secrets engines</h3>
{{#if this.filteredSecretsEngines}}
<LinkTo class="is-no-underline has-right-margin-xxs" @route="vault.cluster.secrets.backends">
<LinkTo class="has-right-margin-xxs" @route="vault.cluster.secrets.backends">
Details
</LinkTo>
{{/if}}
@@ -27,16 +27,12 @@
<div>
<div class="is-flex-center">
{{#if backend.icon}}
<ToolTip @horizontalPosition="left" as |T|>
<T.Trigger>
<Icon @name={{backend.icon}} class={{unless backend.isSupportedBackend "has-text-grey"}} />
</T.Trigger>
<T.Content @defaultClass="tool-tip">
<div class="box">
{{or backend.engineType backend.path}}
</div>
</T.Content>
</ToolTip>
<Icon
@name={{backend.icon}}
@title="{{or backend.engineType backend.path}} type backend"
title={{or backend.engineType backend.path}}
class={{unless backend.isSupportedBackend "has-text-grey"}}
/>
{{/if}}
{{#if backend.path}}
{{#if backend.isSupportedBackend}}
@@ -65,12 +61,7 @@
{{/if}}
</div>
{{#if backend.isSupportedBackend}}
<LinkTo
@route={{backend.backendLink}}
@model={{backend.id}}
class="has-text-weight-semibold is-no-underline"
data-test-view
>
<LinkTo @route={{backend.backendLink}} @model={{backend.id}} class="has-text-weight-semibold" data-test-view>
View
</LinkTo>
{{/if}}

View File

@@ -19,7 +19,7 @@
{{#let (tabs-for-auth-section @methodModel "authConfig" @paths) as |tabs|}}
{{#if tabs.length}}
<div class="tabs-container box is-sideless is-fullwidth is-paddingless is-marginless">
<nav class="tabs">
<nav class="tabs" aria-label="navigation to manage {{@methodModel.id}}">
<ul>
{{#each tabs as |tab|}}
<li>

View File

@@ -11,14 +11,18 @@
</p.levelLeft>
</PageHeader>
<div class="tabs-container box is-sideless is-fullwidth is-paddingless is-marginless">
<nav class="tabs">
<nav class="tabs" aria-label="navigation for entities">
<ul>
<li>
<LinkTo @route="vault.cluster.access.identity.index" @model={{pluralize this.identityType}}>
{{capitalize (pluralize this.identityType)}}
</LinkTo>
</li>
<li>
<LinkTo @route="vault.cluster.access.identity.aliases.index" @model={{pluralize this.identityType}}>
Aliases
</LinkTo>
</li>
</ul>
</nav>
</div>

View File

@@ -5,7 +5,7 @@
<PopupMenu @name="alias-menu">
{{#let (get this.params "0") as |item|}}
<nav class="menu">
<nav class="menu" aria-label="navigation for managing aliases">
<ul class="menu-list">
<li class="action">
<LinkTo

View File

@@ -4,7 +4,7 @@
~}}
<PopupMenu @name="member-edit-menu">
<nav class="menu">
<nav class="menu" aria-label="navigation for managing identity members">
<ul class="menu-list">
<li class="action">
<ConfirmAction

View File

@@ -4,7 +4,7 @@
~}}
<PopupMenu @name="metadata-edit-menu">
<nav class="menu">
<nav class="menu" aria-label="navigation for managing identity metadata">
<ul class="menu-list">
<li class="action">
<ConfirmAction

View File

@@ -4,7 +4,7 @@
~}}
<PopupMenu @name="policy-menu">
<nav class="menu">
<nav class="menu" aria-label="navigation for managing identity policies">
<ul class="menu-list">
<li class="action">
<LinkTo @route="vault.cluster.policy.show" @models={{array "acl" this.policyName}}>

Some files were not shown because too many files have changed in this diff Show More