Auto Auth Healing for Proxy (#26307)

* Auto Auth Healing for Proxy

* Edited changelog

* Fix failing tests and small comment change

* Readded check because proxy cache is initialized with inmem sink
This commit is contained in:
divyaac
2024-04-09 12:35:39 -07:00
committed by GitHub
parent d56f564362
commit fcef8adfb0
11 changed files with 1243 additions and 156 deletions

4
changelog/26307.txt Normal file
View File

@@ -0,0 +1,4 @@
```release-note:improvement
proxy: Proxy will re-trigger auto auth if the token used for requests has been revoked, has exceeded the number of uses,
or is an otherwise invalid value.
```

View File

@@ -16,6 +16,7 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
"time"
systemd "github.com/coreos/go-systemd/daemon"
@@ -540,6 +541,83 @@ func (c *AgentCommand) Run(args []string) int {
}
}
// Create the AuthHandler, SinkServer, TemplateServer, and ExecServer now so that we can pass AuthHandler struct
// values into the Proxy http.Handler. We will wait to actually start these servers
// once we have configured the handlers for each listener below
authInProgress := &atomic.Bool{}
invalidTokenErrCh := make(chan error)
var ah *auth.AuthHandler
var ss *sink.SinkServer
var ts *template.Server
var es *exec.Server
if method != nil {
enableTemplateTokenCh := len(config.Templates) > 0
enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0
// Auth Handler is going to set its own retry values, so we want to
// work on a copy of the client to not affect other subsystems.
ahClient, err := c.client.CloneWithHeaders()
if err != nil {
c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err))
return 1
}
// Override the set namespace with the auto-auth specific namespace
if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" {
ahClient.SetNamespace(config.AutoAuth.Method.Namespace)
}
if config.DisableIdleConnsAutoAuth {
ahClient.SetMaxIdleConnections(-1)
}
if config.DisableKeepAlivesAutoAuth {
ahClient.SetDisableKeepAlives(true)
}
ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{
Logger: c.logger.Named("auth.handler"),
Client: ahClient,
WrapTTL: config.AutoAuth.Method.WrapTTL,
MinBackoff: config.AutoAuth.Method.MinBackoff,
MaxBackoff: config.AutoAuth.Method.MaxBackoff,
EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials,
EnableTemplateTokenCh: enableTemplateTokenCh,
EnableExecTokenCh: enableEnvTemplateTokenCh,
Token: previousToken,
ExitOnError: config.AutoAuth.Method.ExitOnError,
UserAgent: useragent.AgentAutoAuthString(),
MetricsSignifier: "agent",
})
ss = sink.NewSinkServer(&sink.SinkServerConfig{
Logger: c.logger.Named("sink.server"),
Client: ahClient,
ExitAfterAuth: config.ExitAfterAuth,
})
ts = template.NewServer(&template.ServerConfig{
Logger: c.logger.Named("template.server"),
LogLevel: c.logger.GetLevel(),
LogWriter: c.logWriter,
AgentConfig: c.config,
Namespace: templateNamespace,
ExitAfterAuth: config.ExitAfterAuth,
})
es, err = exec.NewServer(&exec.ServerConfig{
AgentConfig: c.config,
Namespace: templateNamespace,
Logger: c.logger.Named("exec.server"),
LogLevel: c.logger.GetLevel(),
LogWriter: c.logWriter,
})
if err != nil {
c.logger.Error("could not create exec server", "error", err)
return 1
}
}
var listeners []net.Listener
// If there are templates, add an in-process listener
@@ -578,31 +656,28 @@ func (c *AgentCommand) Run(args []string) int {
listeners = append(listeners, ln)
proxyVaultToken := true
var inmemSink sink.Sink
apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink")
inmemSink, err := inmem.New(&sink.SinkConfig{
Logger: apiProxyLogger,
}, leaseCache)
if err != nil {
c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err))
c.tlsReloadFuncsLock.Unlock()
return 1
}
sinks = append(sinks, &sink.SinkConfig{
Logger: apiProxyLogger,
Sink: inmemSink,
})
if config.APIProxy != nil {
if config.APIProxy.UseAutoAuthToken {
apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink")
inmemSink, err = inmem.New(&sink.SinkConfig{
Logger: apiProxyLogger,
}, leaseCache)
if err != nil {
c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err))
c.tlsReloadFuncsLock.Unlock()
return 1
}
sinks = append(sinks, &sink.SinkConfig{
Logger: apiProxyLogger,
Sink: inmemSink,
})
}
proxyVaultToken = !config.APIProxy.ForceAutoAuthToken
}
var muxHandler http.Handler
if leaseCache != nil {
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken)
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh)
} else {
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken)
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh)
}
// Parse 'require_request_header' listener config option, and wrap
@@ -708,71 +783,6 @@ func (c *AgentCommand) Run(args []string) int {
// Start auto-auth and sink servers
if method != nil {
enableTemplateTokenCh := len(config.Templates) > 0
enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0
// Auth Handler is going to set its own retry values, so we want to
// work on a copy of the client to not affect other subsystems.
ahClient, err := c.client.CloneWithHeaders()
if err != nil {
c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err))
return 1
}
// Override the set namespace with the auto-auth specific namespace
if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" {
ahClient.SetNamespace(config.AutoAuth.Method.Namespace)
}
if config.DisableIdleConnsAutoAuth {
ahClient.SetMaxIdleConnections(-1)
}
if config.DisableKeepAlivesAutoAuth {
ahClient.SetDisableKeepAlives(true)
}
ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{
Logger: c.logger.Named("auth.handler"),
Client: ahClient,
WrapTTL: config.AutoAuth.Method.WrapTTL,
MinBackoff: config.AutoAuth.Method.MinBackoff,
MaxBackoff: config.AutoAuth.Method.MaxBackoff,
EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials,
EnableTemplateTokenCh: enableTemplateTokenCh,
EnableExecTokenCh: enableEnvTemplateTokenCh,
Token: previousToken,
ExitOnError: config.AutoAuth.Method.ExitOnError,
UserAgent: useragent.AgentAutoAuthString(),
MetricsSignifier: "agent",
})
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: c.logger.Named("sink.server"),
Client: ahClient,
ExitAfterAuth: config.ExitAfterAuth,
})
ts := template.NewServer(&template.ServerConfig{
Logger: c.logger.Named("template.server"),
LogLevel: c.logger.GetLevel(),
LogWriter: c.logWriter,
AgentConfig: c.config,
Namespace: templateNamespace,
ExitAfterAuth: config.ExitAfterAuth,
})
es, err := exec.NewServer(&exec.ServerConfig{
AgentConfig: c.config,
Namespace: templateNamespace,
Logger: c.logger.Named("exec.server"),
LogLevel: c.logger.GetLevel(),
LogWriter: c.logWriter,
})
if err != nil {
c.logger.Error("could not create exec server", "error", err)
return 1
}
g.Add(func() error {
return ah.Run(ctx, method)

View File

@@ -319,7 +319,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) {
mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx))
// Passing a non-nil inmemsink tells the agent to use the auto-auth token
mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true))
mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true, nil, nil))
server := &http.Server{
Handler: mux,
ReadHeaderTimeout: 10 * time.Second,

View File

@@ -9,7 +9,7 @@ import (
gohttp "net/http"
"sync"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-retryablehttp"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/namespace"

View File

@@ -285,9 +285,9 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v
mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true, nil, nil))
} else {
mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true))
mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true, nil, nil))
}
server := &http.Server{

View File

@@ -81,7 +81,7 @@ func TestCache_AutoAuthTokenStripping(t *testing.T) {
mux := http.NewServeMux()
mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true, nil, nil))
server := &http.Server{
Handler: mux,
ReadHeaderTimeout: 10 * time.Second,
@@ -170,7 +170,7 @@ func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) {
mux := http.NewServeMux()
// mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false))
mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false, nil, nil))
server := &http.Server{
Handler: mux,
ReadHeaderTimeout: 10 * time.Second,

View File

@@ -13,6 +13,8 @@ import (
"io"
"io/ioutil"
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/armon/go-metrics"
@@ -23,7 +25,7 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler {
func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool, authInProgress *atomic.Bool, invalidTokenErrCh chan error) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logger.Info("received request", "method", r.Method, "path", r.URL.Path)
@@ -33,9 +35,13 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm
token := r.Header.Get(consts.AuthHeaderName)
if token == "" && inmemSink != nil {
logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path)
token = inmemSink.(sink.SinkReader).Token()
var autoAuthToken string
if inmemSink != nil {
autoAuthToken = inmemSink.(sink.SinkReader).Token()
if token == "" {
logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path)
token = autoAuthToken
}
}
// Parse and reset body.
@@ -59,10 +65,22 @@ func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inm
if err != nil {
// If this is an api.Response error, don't wrap the response.
if resp != nil && resp.Response.Error() != nil {
responseErrMessage := resp.Response.Error()
copyHeader(w.Header(), resp.Response.Header)
w.WriteHeader(resp.Response.StatusCode)
io.Copy(w, resp.Response.Body)
metrics.IncrCounter([]string{"agent", "proxy", "client_error"}, 1)
// Re-trigger auto auth if the token is the same as the auto auth token
if resp.Response.StatusCode == 403 && strings.Contains(responseErrMessage.Error(), logical.ErrInvalidToken.Error()) &&
autoAuthToken == token && !authInProgress.Load() {
// Drain the error channel first
logger.Info("proxy received an invalid token error")
select {
case <-invalidTokenErrCh:
default:
}
invalidTokenErrCh <- resp.Response.Error()
}
} else {
metrics.IncrCounter([]string{"agent", "proxy", "error"}, 1)
logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err))

View File

@@ -9,8 +9,11 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"sync/atomic"
"time"
"github.com/hashicorp/go-hclog"
@@ -20,6 +23,7 @@ import (
"github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb"
"github.com/hashicorp/vault/command/agentproxyshared/sink"
"github.com/hashicorp/vault/helper/useragent"
"github.com/hashicorp/vault/sdk/logical"
"golang.org/x/exp/maps"
"nhooyr.io/websocket"
)
@@ -359,13 +363,23 @@ func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Con
}
if err != nil {
errMessage := err.Error()
if resp != nil {
if resp.StatusCode == http.StatusNotFound {
return nil, fmt.Errorf("received 404 when opening web socket to %s, ensure Vault is Enterprise version 1.16 or above", wsURL)
}
if resp.StatusCode == http.StatusForbidden {
var errBytes []byte
errBytes, err = io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("error occured when attempting to read error response from Vault server")
}
errMessage = string(errBytes)
}
}
return nil, fmt.Errorf("error returned when opening event stream web socket to %s, ensure auto-auth token"+
" has correct permissions and Vault is Enterprise version 1.16 or above: %w", wsURL, err)
" has correct permissions and Vault is Enterprise version 1.16 or above: %s", wsURL, errMessage)
}
if conn == nil {
@@ -379,7 +393,7 @@ func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Con
// Once a token is provided to the sink, we will start the websocket and start consuming
// events and updating secrets.
// Run will shut down gracefully when the context is cancelled.
func (updater *StaticSecretCacheUpdater) Run(ctx context.Context) error {
func (updater *StaticSecretCacheUpdater) Run(ctx context.Context, authRenewalInProgress *atomic.Bool, invalidTokenErrCh chan error) error {
updater.logger.Info("starting static secret cache updater subsystem")
defer func() {
updater.logger.Info("static secret cache updater subsystem stopped")
@@ -415,6 +429,15 @@ tokenLoop:
if err != nil {
updater.logger.Error("error occurred during streaming static secret cache update events", "err", err)
shouldBackoff = true
if strings.Contains(err.Error(), logical.ErrInvalidToken.Error()) && !authRenewalInProgress.Load() {
// Drain the channel in case there is an error that has already been sent but not received
select {
case <-invalidTokenErrCh:
default:
}
updater.logger.Error("received invalid token error while opening websocket")
invalidTokenErrCh <- err
}
continue
}
}

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"sync"
syncatomic "sync/atomic"
"testing"
"time"
@@ -156,6 +157,136 @@ func TestOpenWebSocketConnection(t *testing.T) {
}
}
// TestOpenWebSocketConnection_BadPolicyToken tests attempting to open a websocket
// connection to the events system using a token that has incorrect policy access
// will not trigger auto auth
func TestOpenWebSocketConnection_BadPolicyToken(t *testing.T) {
// We need a valid cluster for the connection to succeed.
cluster := minimal.NewTestSoloCluster(t, nil)
client := cluster.Cores[0].Client
updater := testNewStaticSecretCacheUpdater(t, client)
eventPolicy := `path "sys/events/subscribe/*" {
capabilities = ["deny"]
}`
client.Sys().PutPolicy("no_events_access", eventPolicy)
// Create a new token with a bad policy
token, err := client.Auth().Token().Create(&api.TokenCreateRequest{
Policies: []string{"no_events_access"},
})
require.NoError(t, err)
// Set the client token to one with an invalid policy
updater.tokenSink.WriteToken(token.Auth.ClientToken)
client.SetToken(token.Auth.ClientToken)
ctx, cancelFunc := context.WithCancel(context.Background())
authInProgress := &syncatomic.Bool{}
renewalChannel := make(chan error)
errCh := make(chan error)
go func() {
errCh <- updater.Run(ctx, authInProgress, renewalChannel)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
defer cancelFunc()
// Verify that the token has been written to the sink before checking auto auth
// is not re-triggered
err = updater.streamStaticSecretEvents(ctx)
require.ErrorContains(t, err, logical.ErrPermissionDenied.Error())
// Auto auth should not be retriggered
timeout := time.After(2 * time.Second)
select {
case <-renewalChannel:
t.Fatal("incorrectly triggered auto auth")
case <-ctx.Done():
t.Fatal("context was closed before auto auth could be re-triggered")
case <-timeout:
}
}
// TestOpenWebSocketConnection_AutoAuthSelfHeal tests attempting to open a websocket
// connection to the events system using an invalid token will re-trigger
// auto auth.
func TestOpenWebSocketConnection_AutoAuthSelfHeal(t *testing.T) {
// We need a valid cluster for the connection to succeed.
cluster := minimal.NewTestSoloCluster(t, nil)
client := cluster.Cores[0].Client
updater := testNewStaticSecretCacheUpdater(t, client)
// Revoke the token before it can be used to open a connection to the events system
client.Auth().Token().RevokeOrphan(client.Token())
updater.tokenSink.WriteToken(client.Token())
time.Sleep(100 * time.Millisecond)
ctx, cancelFunc := context.WithCancel(context.Background())
authInProgress := &syncatomic.Bool{}
renewalChannel := make(chan error)
errCh := make(chan error)
go func() {
errCh <- updater.Run(ctx, authInProgress, renewalChannel)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
defer cancelFunc()
// Wait for static secret updater to begin
timeout := time.After(10 * time.Second)
select {
case <-renewalChannel:
case <-ctx.Done():
t.Fatal("context was closed before auto auth could be re-triggered")
case <-timeout:
t.Fatal("timed out before auto auth could be re-triggered")
}
authInProgress.Store(false)
// Verify that auto auth is re-triggered again because another auth is "not in progress"
timeout = time.After(15 * time.Second)
select {
case <-renewalChannel:
case <-ctx.Done():
t.Fatal("context was closed before auto auth could be re-triggered")
case <-timeout:
t.Fatal("timed out before auto auth could be re-triggered")
}
authInProgress.Store(true)
// Verify that auto auth is NOT re-triggered again because another auth is in progress
timeout = time.After(2 * time.Second)
select {
case <-renewalChannel:
t.Fatal("auto auth was incorrectly re-triggered")
case <-ctx.Done():
t.Fatal("context was closed before auto auth could be re-triggered")
case <-timeout:
}
}
// TestOpenWebSocketConnectionReceivesEventsDefaultMount tests that the openWebSocketConnection function
// works as expected with the default KVV1 mount, and then the connection can be used to receive an event.
// This acts as more of an event system sanity check than a test of the updater

View File

@@ -15,6 +15,7 @@ import (
"sort"
"strings"
"sync"
"sync/atomic"
"time"
systemd "github.com/coreos/go-systemd/daemon"
@@ -529,6 +530,58 @@ func (c *ProxyCommand) Run(args []string) int {
}
}
// Create the AuthHandler and the Sink Server so that we can pass AuthHandler struct
// values into the Proxy http.Handler. We will wait to actually start these servers
// once we have configured handlers for each listener below
authInProgress := &atomic.Bool{}
invalidTokenErrCh := make(chan error)
var ah *auth.AuthHandler
var ss *sink.SinkServer
if method != nil {
// Auth Handler is going to set its own retry values, so we want to
// work on a copy of the client to not affect other subsystems.
ahClient, err := c.client.CloneWithHeaders()
if err != nil {
c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err))
return 1
}
// Override the set namespace with the auto-auth specific namespace
if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" {
ahClient.SetNamespace(config.AutoAuth.Method.Namespace)
}
if config.DisableIdleConnsAutoAuth {
ahClient.SetMaxIdleConnections(-1)
}
if config.DisableKeepAlivesAutoAuth {
ahClient.SetDisableKeepAlives(true)
}
ah = auth.NewAuthHandler(&auth.AuthHandlerConfig{
Logger: c.logger.Named("auth.handler"),
Client: ahClient,
WrapTTL: config.AutoAuth.Method.WrapTTL,
MinBackoff: config.AutoAuth.Method.MinBackoff,
MaxBackoff: config.AutoAuth.Method.MaxBackoff,
EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials,
Token: previousToken,
ExitOnError: config.AutoAuth.Method.ExitOnError,
UserAgent: useragent.ProxyAutoAuthString(),
MetricsSignifier: "proxy",
})
authInProgress = ah.AuthInProgress
invalidTokenErrCh = ah.InvalidToken
ss = sink.NewSinkServer(&sink.SinkServerConfig{
Logger: c.logger.Named("sink.server"),
Client: ahClient,
ExitAfterAuth: config.ExitAfterAuth,
})
}
var listeners []net.Listener
// Ensure we've added all the reload funcs for TLS before anyone triggers a reload.
@@ -561,32 +614,29 @@ func (c *ProxyCommand) Run(args []string) int {
listeners = append(listeners, ln)
apiProxyLogger.Debug("configuring inmem auto-auth sink")
inmemSink, err := inmem.New(&sink.SinkConfig{
Logger: apiProxyLogger,
}, leaseCache)
if err != nil {
c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err))
c.tlsReloadFuncsLock.Unlock()
return 1
}
sinks = append(sinks, &sink.SinkConfig{
Logger: apiProxyLogger,
Sink: inmemSink,
})
proxyVaultToken := true
var inmemSink sink.Sink
if config.APIProxy != nil {
if config.APIProxy.UseAutoAuthToken {
apiProxyLogger.Debug("configuring inmem auto-auth sink")
inmemSink, err = inmem.New(&sink.SinkConfig{
Logger: apiProxyLogger,
}, leaseCache)
if err != nil {
c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err))
c.tlsReloadFuncsLock.Unlock()
return 1
}
sinks = append(sinks, &sink.SinkConfig{
Logger: apiProxyLogger,
Sink: inmemSink,
})
}
proxyVaultToken = !config.APIProxy.ForceAutoAuthToken
}
var muxHandler http.Handler
if leaseCache != nil {
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken)
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh)
} else {
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken)
muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken, authInProgress, invalidTokenErrCh)
}
// Parse 'require_request_header' listener config option, and wrap
@@ -692,46 +742,6 @@ func (c *ProxyCommand) Run(args []string) int {
// Start auto-auth and sink servers
if method != nil {
// Auth Handler is going to set its own retry values, so we want to
// work on a copy of the client to not affect other subsystems.
ahClient, err := c.client.CloneWithHeaders()
if err != nil {
c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err))
return 1
}
// Override the set namespace with the auto-auth specific namespace
if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" {
ahClient.SetNamespace(config.AutoAuth.Method.Namespace)
}
if config.DisableIdleConnsAutoAuth {
ahClient.SetMaxIdleConnections(-1)
}
if config.DisableKeepAlivesAutoAuth {
ahClient.SetDisableKeepAlives(true)
}
ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{
Logger: c.logger.Named("auth.handler"),
Client: ahClient,
WrapTTL: config.AutoAuth.Method.WrapTTL,
MinBackoff: config.AutoAuth.Method.MinBackoff,
MaxBackoff: config.AutoAuth.Method.MaxBackoff,
EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials,
Token: previousToken,
ExitOnError: config.AutoAuth.Method.ExitOnError,
UserAgent: useragent.ProxyAutoAuthString(),
MetricsSignifier: "proxy",
})
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: c.logger.Named("sink.server"),
Client: ahClient,
ExitAfterAuth: config.ExitAfterAuth,
})
g.Add(func() error {
return ah.Run(ctx, method)
}, func(error) {
@@ -773,7 +783,7 @@ func (c *ProxyCommand) Run(args []string) int {
// Add the static secret cache updater, if appropriate
if updater != nil {
g.Add(func() error {
err := updater.Run(ctx)
err := updater.Run(ctx, authInProgress, invalidTokenErrCh)
return err
}, func(error) {
cancelFunc()

View File

@@ -240,6 +240,896 @@ auto_auth {
}
}
// TestProxy_NoTriggerAutoAuth_BadPolicy tests that auto auth is not re-triggered
// if Proxy uses a token with incorrect policy access.
func TestProxy_NoTriggerAutoAuth_BadPolicy(t *testing.T) {
proxyLogger := logging.NewVaultLogger(hclog.Trace)
vaultLogger := logging.NewVaultLogger(hclog.Info)
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: vaultLogger,
})
cluster.Start()
defer cluster.Cleanup()
serverClient := cluster.Cores[0].Client
// Add a secret to the KV engine
_, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{"user": "something"})
require.NoError(t, err)
// Create kv read policy
noKvAccess := `path "secret/*" {
capabilities = ["deny"]
}`
err = serverClient.Sys().PutPolicy("noKvAccess", noKvAccess)
require.NoError(t, err)
// Create a token with that policy
opts := &api.TokenCreateRequest{Policies: []string{"noKvAccess"}}
tokenResp, err := serverClient.Auth().Token().Create(opts)
require.NoError(t, err)
firstToken := tokenResp.Auth.ClientToken
// Create token file
tokenFileName := makeTempFile(t, "token-file", firstToken)
defer os.Remove(tokenFileName)
sinkf, err := os.CreateTemp("", "sink.test.")
if err != nil {
t.Fatal(err)
}
sink := sinkf.Name()
sinkf.Close()
os.Remove(sink)
autoAuthConfig := fmt.Sprintf(`
auto_auth {
method {
type = "token_file"
config = {
token_file_path = "%s"
}
}
sink "file" {
config = {
path = "%s"
}
}
}`, tokenFileName, sink)
listenAddr := generateListenerAddress(t)
listenConfig := fmt.Sprintf(`
listener "tcp" {
address = "%s"
tls_disable = true
}
`, listenAddr)
config := fmt.Sprintf(`
vault {
address = "%s"
tls_skip_verify = true
}
api_proxy {
use_auto_auth_token = "force"
}
%s
%s
`, serverClient.Address(), listenConfig, autoAuthConfig)
configPath := makeTempFile(t, "config.hcl", config)
defer os.Remove(configPath)
// Unset the environment variable so that proxy picks up the right test
// cluster address
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Unsetenv(api.EnvVaultAddress)
// Start proxy
_, cmd := testProxyCommand(t, proxyLogger)
cmd.startedCh = make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
cmd.Run([]string{"-config", configPath})
wg.Done()
}()
select {
case <-cmd.startedCh:
case <-time.After(5 * time.Second):
t.Fatalf("timeout")
}
// Validate that the auto-auth token has been correctly attained
// and works for LookupSelf
conf := api.DefaultConfig()
conf.Address = "http://" + listenAddr
proxyClient, err := api.NewClient(conf)
if err != nil {
t.Fatal(err)
}
proxyClient.SetToken("")
err = proxyClient.SetAddress("http://" + listenAddr)
if err != nil {
t.Fatal(err)
}
// Wait for re-triggered auto auth to write new token to sink
waitForFile := func(prevModTime time.Time) time.Time {
ticker := time.Tick(100 * time.Millisecond)
timeout := time.After(15 * time.Second)
for {
select {
case <-ticker:
case <-timeout:
return prevModTime
}
modTime, err := os.Stat(sink)
require.NoError(t, err)
if modTime.ModTime().After(prevModTime) {
return modTime.ModTime()
}
}
}
// Wait for the token to be sent to syncs and be available to be used
initialModTime := waitForFile(time.Time{})
req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_ = request(t, proxyClient, req, 200)
// Write a new token to the token file
newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{})
require.NoError(t, err)
secondToken := newTokenResp.Auth.ClientToken
err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600)
require.NoError(t, err)
// Make a request to a path that the token does not have access to
req = proxyClient.NewRequest("GET", "/v1/secret/foo")
_, err = proxyClient.RawRequest(req)
require.Error(t, err)
require.ErrorContains(t, err, logical.ErrPermissionDenied.Error())
require.NotContains(t, err.Error(), logical.ErrInvalidToken.Error())
// Sleep for a bit to ensure that auto auth is not re-triggered
newModTime := waitForFile(initialModTime)
if newModTime.After(initialModTime) {
t.Fatal("auto auth was incorrectly re-triggered")
}
// Read from the sink file and verify that the token has not changed
newToken, err := os.ReadFile(sink)
require.Equal(t, firstToken, string(newToken))
close(cmd.ShutdownCh)
wg.Wait()
}
// TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth tests that auto auth is not re-triggered
// if Proxy uses a token that is not equal to the auto auth token
func TestProxy_NoTriggerAutoAuth_ProxyTokenNotAutoAuth(t *testing.T) {
proxyLogger := logging.NewVaultLogger(hclog.Info)
vaultLogger := logging.NewVaultLogger(hclog.Info)
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: vaultLogger,
})
cluster.Start()
defer cluster.Cleanup()
serverClient := cluster.Cores[0].Client
// Create a token
tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{})
require.NoError(t, err)
firstToken := tokenResp.Auth.ClientToken
// Create token file
tokenFileName := makeTempFile(t, "token-file", firstToken)
defer os.Remove(tokenFileName)
sinkf, err := os.CreateTemp("", "sink.test.")
if err != nil {
t.Fatal(err)
}
sink := sinkf.Name()
sinkf.Close()
os.Remove(sink)
autoAuthConfig := fmt.Sprintf(`
auto_auth {
method {
type = "token_file"
config = {
token_file_path = "%s"
}
}
sink "file" {
config = {
path = "%s"
}
}
}`, tokenFileName, sink)
listenAddr := generateListenerAddress(t)
listenConfig := fmt.Sprintf(`
listener "tcp" {
address = "%s"
tls_disable = true
}
`, listenAddr)
// Do not use the auto auth token if a token is provided with the proxy client
config := fmt.Sprintf(`
vault {
address = "%s"
tls_skip_verify = true
}
api_proxy {
use_auto_auth_token = true
}
%s
%s
`, serverClient.Address(), listenConfig, autoAuthConfig)
configPath := makeTempFile(t, "config.hcl", config)
defer os.Remove(configPath)
// Unset the environment variable so that proxy picks up the right test
// cluster address
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Unsetenv(api.EnvVaultAddress)
// Start proxy
_, cmd := testProxyCommand(t, proxyLogger)
cmd.startedCh = make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
cmd.Run([]string{"-config", configPath})
wg.Done()
}()
select {
case <-cmd.startedCh:
case <-time.After(5 * time.Second):
t.Fatalf("timeout")
}
// Validate that the auto-auth token has been correctly attained
// and works for LookupSelf
conf := api.DefaultConfig()
conf.Address = "http://" + listenAddr
proxyClient, err := api.NewClient(conf)
if err != nil {
t.Fatal(err)
}
proxyClient.SetToken(firstToken)
err = proxyClient.SetAddress("http://" + listenAddr)
if err != nil {
t.Fatal(err)
}
// Wait for re-triggered auto auth to write new token to sink
waitForFile := func(prevModTime time.Time) time.Time {
ticker := time.Tick(100 * time.Millisecond)
timeout := time.After(15 * time.Second)
for {
select {
case <-ticker:
case <-timeout:
return prevModTime
}
modTime, err := os.Stat(sink)
require.NoError(t, err)
if modTime.ModTime().After(prevModTime) {
return modTime.ModTime()
}
}
}
// Wait for the token is available to be used
createTime := waitForFile(time.Time{})
require.NoError(t, err)
req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.NoError(t, err)
// Revoke token
req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke")
req.BodyBytes = []byte(fmt.Sprintf(`{
"token": "%s"
}`, firstToken))
_ = request(t, serverClient, req, 204)
// Write a new token to the token file
newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{})
require.NoError(t, err)
secondToken := newTokenResp.Auth.ClientToken
err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600)
require.NoError(t, err)
// Proxy uses revoked token to make request and should result in an error
proxyClient.SetToken("random token")
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.Error(t, err)
// Wait to see if the sink file is modified
newModTime := waitForFile(createTime)
if newModTime.After(createTime) {
t.Fatal("auto auth was incorrectly re-triggered")
}
// Read from the sink and verify that the token has not changed
newToken, err := os.ReadFile(sink)
require.Equal(t, firstToken, string(newToken))
close(cmd.ShutdownCh)
wg.Wait()
}
// TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken tests that auto auth is re-triggered
// if Proxy always forcibly uses the auto auth token
func TestProxy_ReTriggerAutoAuth_ForceAutoAuthToken(t *testing.T) {
proxyLogger := logging.NewVaultLogger(hclog.Trace)
vaultLogger := logging.NewVaultLogger(hclog.Info)
cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: vaultLogger,
})
cluster.Start()
defer cluster.Cleanup()
serverClient := cluster.Cores[0].Client
// Create a token
tokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{})
require.NoError(t, err)
firstToken := tokenResp.Auth.ClientToken
// Create token file
tokenFileName := makeTempFile(t, "token-file", firstToken)
defer os.Remove(tokenFileName)
sinkf, err := os.CreateTemp("", "sink.test.")
if err != nil {
t.Fatal(err)
}
sink := sinkf.Name()
sinkf.Close()
os.Remove(sink)
autoAuthConfig := fmt.Sprintf(`
auto_auth {
method {
type = "token_file"
config = {
token_file_path = "%s"
}
}
sink "file" {
config = {
path = "%s"
}
}
}`, tokenFileName, sink)
listenAddr := generateListenerAddress(t)
listenConfig := fmt.Sprintf(`
listener "tcp" {
address = "%s"
tls_disable = true
}
`, listenAddr)
// Do not use the auto auth token if a token is provided with the proxy client
config := fmt.Sprintf(`
vault {
address = "%s"
tls_skip_verify = true
}
api_proxy {
use_auto_auth_token = "force"
}
%s
%s
`, serverClient.Address(), listenConfig, autoAuthConfig)
configPath := makeTempFile(t, "config.hcl", config)
defer os.Remove(configPath)
// Unset the environment variable so that proxy picks up the right test
// cluster address
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Unsetenv(api.EnvVaultAddress)
// Start proxy
_, cmd := testProxyCommand(t, proxyLogger)
cmd.startedCh = make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
cmd.Run([]string{"-config", configPath})
wg.Done()
}()
select {
case <-cmd.startedCh:
case <-time.After(5 * time.Second):
t.Fatalf("timeout")
}
// Validate that the auto-auth token has been correctly attained
// and works for LookupSelf
conf := api.DefaultConfig()
conf.Address = "http://" + listenAddr
proxyClient, err := api.NewClient(conf)
if err != nil {
t.Fatal(err)
}
proxyClient.SetToken(firstToken)
err = proxyClient.SetAddress("http://" + listenAddr)
if err != nil {
t.Fatal(err)
}
// Wait for re-triggered auto auth to write new token to sink
waitForFile := func(prevModTime time.Time) time.Time {
ticker := time.Tick(100 * time.Millisecond)
timeout := time.After(15 * time.Second)
for {
select {
case <-ticker:
case <-timeout:
return prevModTime
}
modTime, err := os.Stat(sink)
require.NoError(t, err)
if modTime.ModTime().After(prevModTime) {
return modTime.ModTime()
}
}
}
// Wait for the token is available to be used
createTime := waitForFile(time.Time{})
require.NoError(t, err)
req := proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.NoError(t, err)
// Revoke token
req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke")
req.BodyBytes = []byte(fmt.Sprintf(`{
"token": "%s"
}`, firstToken))
_ = request(t, serverClient, req, 204)
// Create new token
newTokenResp, err := serverClient.Auth().Token().Create(&api.TokenCreateRequest{})
require.NoError(t, err)
secondToken := newTokenResp.Auth.ClientToken
// Proxy uses the same token in the token file to make a request, which should result in error
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.Error(t, err)
// Write a new token to the token file so that auto auth can write new token to sink
err = os.WriteFile(tokenFileName, []byte(secondToken), 0o600)
require.NoError(t, err)
// Wait to see if that the sink file is modified
waitForFile(createTime)
// Read from the sink and verify that the sink contains the new token
newToken, err := os.ReadFile(sink)
require.Equal(t, secondToken, string(newToken))
close(cmd.ShutdownCh)
wg.Wait()
}
// TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken tests that auto auth is re-triggered
// the proxy client uses a token that is equal to the auto auth token
func TestProxy_ReTriggerAutoAuth_ProxyIsAutoAuthToken(t *testing.T) {
proxyLogger := logging.NewVaultLogger(hclog.Trace)
vaultLogger := logging.NewVaultLogger(hclog.Info)
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
CredentialBackends: map[string]logical.Factory{
"approle": credAppRole.Factory,
},
}, &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: vaultLogger,
})
cluster.Start()
defer cluster.Cleanup()
serverClient := cluster.Cores[0].Client
// Enable the approle auth method
req := serverClient.NewRequest("POST", "/v1/sys/auth/approle")
req.BodyBytes = []byte(`{
"type": "approle"
}`)
request(t, serverClient, req, 204)
// Create a named role
req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role")
req.BodyBytes = []byte(`{
"secret_id_num_uses": "10",
"secret_id_ttl": "1m",
"token_max_ttl": "4m",
"token_num_uses": "10",
"token_ttl": "4m",
"policies": "default"
}`)
request(t, serverClient, req, 204)
// Fetch the RoleID of the named role
req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id")
body := request(t, serverClient, req, 200)
data := body["data"].(map[string]interface{})
roleID := data["role_id"].(string)
// Get a SecretID issued against the named role
req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id")
body = request(t, serverClient, req, 200)
data = body["data"].(map[string]interface{})
secretID := data["secret_id"].(string)
// Write the RoleID and SecretID to temp files
roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n")
secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n")
defer os.Remove(roleIDPath)
defer os.Remove(secretIDPath)
sinkf, err := os.CreateTemp("", "sink.test.")
if err != nil {
t.Fatal(err)
}
sink := sinkf.Name()
sinkf.Close()
os.Remove(sink)
autoAuthConfig := fmt.Sprintf(`
auto_auth {
method "approle" {
mount_path = "auth/approle"
config = {
role_id_file_path = "%s"
secret_id_file_path = "%s"
}
}
sink "file" {
config = {
path = "%s"
}
}
}`, roleIDPath, secretIDPath, sink)
listenAddr := generateListenerAddress(t)
listenConfig := fmt.Sprintf(`
listener "tcp" {
address = "%s"
tls_disable = true
}
`, listenAddr)
config := fmt.Sprintf(`
vault {
address = "%s"
tls_skip_verify = true
}
api_proxy {
use_auto_auth_token = true
}
%s
%s
`, serverClient.Address(), listenConfig, autoAuthConfig)
configPath := makeTempFile(t, "config.hcl", config)
defer os.Remove(configPath)
// Unset the environment variable so that proxy picks up the right test
// cluster address
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Unsetenv(api.EnvVaultAddress)
// Start proxy
_, cmd := testProxyCommand(t, proxyLogger)
cmd.startedCh = make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
cmd.Run([]string{"-config", configPath})
wg.Done()
}()
select {
case <-cmd.startedCh:
case <-time.After(5 * time.Second):
t.Errorf("timeout")
}
// Validate that the auto-auth token has been correctly attained
// and works for LookupSelf
conf := api.DefaultConfig()
conf.Address = "http://" + listenAddr
proxyClient, err := api.NewClient(conf)
if err != nil {
t.Fatal(err)
}
err = proxyClient.SetAddress("http://" + listenAddr)
if err != nil {
t.Fatal(err)
}
// Wait for re-triggered auto auth to write new token to sink
waitForFile := func(prevModTime time.Time) {
ticker := time.Tick(100 * time.Millisecond)
timeout := time.After(15 * time.Second)
for {
select {
case <-ticker:
case <-timeout:
t.Fatal("timed out waiting for re-triggered auto auth to complete")
}
modTime, err := os.Stat(sink)
require.NoError(t, err)
if modTime.ModTime().After(prevModTime) {
return
}
}
}
// Wait for the token to be sent to syncs and be available to be used
waitForFile(time.Time{})
oldToken, err := os.ReadFile(sink)
require.NoError(t, err)
prevModTime, err := os.Stat(sink)
require.NoError(t, err)
// Set proxy token
proxyClient.SetToken(string(oldToken))
// Make request using proxy client to test that token is valid
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
body = request(t, proxyClient, req, 200)
// Revoke token
req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke")
req.BodyBytes = []byte(fmt.Sprintf(`{
"token": "%s"
}`, oldToken))
body = request(t, serverClient, req, 204)
// Proxy uses revoked token to make request and should result in an error
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.Error(t, err)
// Wait for new token to be written and available to use
waitForFile(prevModTime.ModTime())
// Verify new token is not equal to the old token
newToken, err := os.ReadFile(sink)
require.NoError(t, err)
require.NotEqual(t, string(newToken), string(oldToken))
// Verify that proxy no longer fails when making a request with the new token
proxyClient.SetToken(string(newToken))
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
body = request(t, proxyClient, req, 200)
close(cmd.ShutdownCh)
wg.Wait()
}
// TestProxy_ReTriggerAutoAuth_RevokedToken tests that auto auth is re-triggered
// when Proxy uses a revoked auto auth token to make a request
func TestProxy_ReTriggerAutoAuth_RevokedToken(t *testing.T) {
proxyLogger := logging.NewVaultLogger(hclog.Trace)
vaultLogger := logging.NewVaultLogger(hclog.Info)
cluster := vault.NewTestCluster(t, &vault.CoreConfig{
CredentialBackends: map[string]logical.Factory{
"approle": credAppRole.Factory,
},
}, &vault.TestClusterOptions{
NumCores: 1,
HandlerFunc: vaulthttp.Handler,
Logger: vaultLogger,
})
cluster.Start()
defer cluster.Cleanup()
serverClient := cluster.Cores[0].Client
// Enable the approle auth method
req := serverClient.NewRequest("POST", "/v1/sys/auth/approle")
req.BodyBytes = []byte(`{
"type": "approle"
}`)
request(t, serverClient, req, 204)
// Create a named role
req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role")
req.BodyBytes = []byte(`{
"secret_id_num_uses": "10",
"secret_id_ttl": "1m",
"token_max_ttl": "4m",
"token_num_uses": "10",
"token_ttl": "4m",
"policies": "default"
}`)
request(t, serverClient, req, 204)
// Fetch the RoleID of the named role
req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id")
body := request(t, serverClient, req, 200)
data := body["data"].(map[string]interface{})
roleID := data["role_id"].(string)
// Get a SecretID issued against the named role
req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id")
body = request(t, serverClient, req, 200)
data = body["data"].(map[string]interface{})
secretID := data["secret_id"].(string)
// Write the RoleID and SecretID to temp files
roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n")
secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n")
defer os.Remove(roleIDPath)
defer os.Remove(secretIDPath)
sinkf, err := os.CreateTemp("", "sink.test.")
if err != nil {
t.Fatal(err)
}
sink := sinkf.Name()
sinkf.Close()
os.Remove(sink)
autoAuthConfig := fmt.Sprintf(`
auto_auth {
method "approle" {
mount_path = "auth/approle"
config = {
role_id_file_path = "%s"
secret_id_file_path = "%s"
}
}
sink "file" {
config = {
path = "%s"
}
}
}`, roleIDPath, secretIDPath, sink)
listenAddr := generateListenerAddress(t)
listenConfig := fmt.Sprintf(`
listener "tcp" {
address = "%s"
tls_disable = true
}
`, listenAddr)
config := fmt.Sprintf(`
vault {
address = "%s"
tls_skip_verify = true
}
api_proxy {
use_auto_auth_token = "force"
}
%s
%s
`, serverClient.Address(), listenConfig, autoAuthConfig)
configPath := makeTempFile(t, "config.hcl", config)
defer os.Remove(configPath)
// Unset the environment variable so that proxy picks up the right test
// cluster address
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Unsetenv(api.EnvVaultAddress)
// Start proxy
_, cmd := testProxyCommand(t, proxyLogger)
cmd.startedCh = make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
cmd.Run([]string{"-config", configPath})
wg.Done()
}()
select {
case <-cmd.startedCh:
case <-time.After(5 * time.Second):
t.Errorf("timeout")
}
// Validate that the auto-auth token has been correctly attained
// and works for LookupSelf
conf := api.DefaultConfig()
conf.Address = "http://" + listenAddr
proxyClient, err := api.NewClient(conf)
if err != nil {
t.Fatal(err)
}
proxyClient.SetToken("")
err = proxyClient.SetAddress("http://" + listenAddr)
if err != nil {
t.Fatal(err)
}
// Wait for re-triggered auto auth to write new token to sink
waitForFile := func(prevModTime time.Time) {
ticker := time.Tick(100 * time.Millisecond)
timeout := time.After(15 * time.Second)
for {
select {
case <-ticker:
case <-timeout:
t.Fatal("timed out waiting for re-triggered auto auth to complete")
}
modTime, err := os.Stat(sink)
require.NoError(t, err)
if modTime.ModTime().After(prevModTime) {
return
}
}
}
// Wait for the token to be sent to syncs and be available to be used
waitForFile(time.Time{})
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
body = request(t, proxyClient, req, 200)
oldToken, err := os.ReadFile(sink)
require.NoError(t, err)
prevModTime, err := os.Stat(sink)
require.NoError(t, err)
// Revoke token
req = serverClient.NewRequest("PUT", "/v1/auth/token/revoke")
req.BodyBytes = []byte(fmt.Sprintf(`{
"token": "%s"
}`, oldToken))
body = request(t, serverClient, req, 204)
// Proxy uses revoked token to make request and should result in an error
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
_, err = proxyClient.RawRequest(req)
require.Error(t, err)
// Wait for new token to be written and available to use
waitForFile(prevModTime.ModTime())
// Verify new token is not equal to the old token
newToken, err := os.ReadFile(sink)
require.NoError(t, err)
require.NotEqual(t, string(newToken), string(oldToken))
// Verify that proxy no longer fails when making a request
req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self")
body = request(t, proxyClient, req, 200)
close(cmd.ShutdownCh)
wg.Wait()
}
// TestProxy_AutoAuth_UserAgent tests that the User-Agent sent
// to Vault by Vault Proxy is correct when performing Auto-Auth.
// Uses the custom handler userAgentHandler (defined above) so
@@ -687,9 +1577,9 @@ vault {
// TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy
func TestProxy_ApiProxy_Retry(t *testing.T) {
//----------------------------------------------------
// ----------------------------------------------------
// Start the server and proxy
//----------------------------------------------------
// ----------------------------------------------------
logger := logging.NewVaultLogger(hclog.Trace)
var h handler
cluster := vault.NewTestCluster(t,
@@ -730,6 +1620,7 @@ func TestProxy_ApiProxy_Retry(t *testing.T) {
intRef := func(i int) *int {
return &i
}
// start test cases here
testCases := map[string]struct {
retries *int